You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2017/07/19 22:34:53 UTC
[01/50] [abbrv] hadoop git commit: HDFS-12105. Ozone: listVolumes
doesn't work from ozone commandline. Contributed by Yiqun Lin.
Repository: hadoop
Updated Branches:
refs/heads/HDFS-7240 a715f60ce -> b3a7f3b2d
HDFS-12105. Ozone: listVolumes doesn't work from ozone commandline. Contributed by Yiqun Lin.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef9ba833
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef9ba833
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef9ba833
Branch: refs/heads/HDFS-7240
Commit: ef9ba8332c90497f4d5383d7661ba3f03c874d6d
Parents: 6798e6d
Author: Weiwei Yang <ww...@apache.org>
Authored: Mon Jul 10 10:24:22 2017 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Wed Jul 12 17:11:46 2017 -0700
----------------------------------------------------------------------
.../ozone/web/client/OzoneRestClient.java | 60 ++++++++++++++-----
.../web/ozShell/volume/ListVolumeHandler.java | 34 +++++++++--
.../hadoop/ozone/web/client/TestVolume.java | 62 +++++++++++++++++++-
3 files changed, 132 insertions(+), 24 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef9ba833/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
index 7c144ad..ebb824a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
@@ -204,24 +204,26 @@ public class OzoneRestClient implements Closeable {
* List all the volumes owned by the user or Owned by the user specified in
* the behalf of string.
*
- * @param onBehalfOf - User Name of the user if it is not the caller. for
- * example, an admin wants to list some other users
- * volumes.
- * @param prefix - Return only volumes that match this prefix.
- * @param maxKeys - Maximum number of results to return, if the result set
- * is smaller than requested size, it means that list is
- * complete.
- * @param prevKey - The last key that client got, server will continue
- * returning results from that point.
+ * @param onBehalfOf
+ * User Name of the user if it is not the caller. for example,
+ * an admin wants to list some other users volumes.
+ * @param prefix
+ * Return only volumes that match this prefix.
+ * @param maxKeys
+ * Maximum number of results to return, if the result set
+ * is smaller than requested size, it means that list is
+ * complete.
+ * @param startVolume
+ * The previous volume name.
* @return List of Volumes
* @throws OzoneException
*/
- public List<OzoneVolume> listVolumes(String onBehalfOf, String prefix, int
- maxKeys, OzoneVolume prevKey) throws OzoneException {
+ public List<OzoneVolume> listVolumes(String onBehalfOf, String prefix,
+ int maxKeys, String startVolume) throws OzoneException {
HttpGet httpGet = null;
try (CloseableHttpClient httpClient = newHttpClient()) {
URIBuilder builder = new URIBuilder(endPointURI);
- if (prefix != null) {
+ if (!Strings.isNullOrEmpty(prefix)) {
builder.addParameter(Header.OZONE_LIST_QUERY_PREFIX, prefix);
}
@@ -230,9 +232,9 @@ public class OzoneRestClient implements Closeable {
.toString(maxKeys));
}
- if (prevKey != null) {
+ if (!Strings.isNullOrEmpty(startVolume)) {
builder.addParameter(Header.OZONE_LIST_QUERY_PREVKEY,
- prevKey.getOwnerName() + "/" + prevKey.getVolumeName());
+ startVolume);
}
builder.setPath("/").build();
@@ -250,6 +252,33 @@ public class OzoneRestClient implements Closeable {
}
/**
+ * List all the volumes owned by the user or Owned by the user specified in
+ * the behalf of string.
+ *
+ * @param onBehalfOf - User Name of the user if it is not the caller. for
+ * example, an admin wants to list some other users
+ * volumes.
+ * @param prefix - Return only volumes that match this prefix.
+ * @param maxKeys - Maximum number of results to return, if the result set
+ * is smaller than requested size, it means that list is
+ * complete.
+ * @param prevKey - The last key that client got, server will continue
+ * returning results from that point.
+ * @return List of Volumes
+ * @throws OzoneException
+ */
+ public List<OzoneVolume> listVolumes(String onBehalfOf, String prefix,
+ int maxKeys, OzoneVolume prevKey) throws OzoneException {
+ String volumeName = null;
+
+ if (prevKey != null) {
+ volumeName = prevKey.getVolumeName();
+ }
+
+ return listVolumes(onBehalfOf, prefix, maxKeys, volumeName);
+ }
+
+ /**
* List volumes of the current user or if onBehalfof is not null lists volume
* owned by that user. You need admin privilege to read other users volume
* lists.
@@ -260,7 +289,8 @@ public class OzoneRestClient implements Closeable {
*/
public List<OzoneVolume> listVolumes(String onBehalfOf)
throws OzoneException {
- return listVolumes(onBehalfOf, null, 1000, null);
+ return listVolumes(onBehalfOf, null,
+ Integer.parseInt(Header.OZONE_DEFAULT_LIST_SIZE), StringUtils.EMPTY);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef9ba833/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
index 3c8c7cb..84a4451 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
@@ -55,6 +55,32 @@ public class ListVolumeHandler extends Handler {
"Incorrect call : listVolume is missing");
}
+ int maxKeys = 0;
+ if (cmd.hasOption(Shell.LIST_LENGTH)) {
+ String length = cmd.getOptionValue(Shell.LIST_LENGTH);
+ try {
+ maxKeys = Integer.parseInt(length);
+ } catch (NumberFormatException nfe) {
+ throw new OzoneRestClientException(
+ "Invalid max key length, the vaule should be digital.");
+ }
+
+ if (maxKeys <= 0) {
+ throw new OzoneRestClientException(
+ "Invalid max key length, the vaule should be a positive number.");
+ }
+ }
+
+ String startVolume = null;
+ if (cmd.hasOption(Shell.START)) {
+ startVolume = cmd.getOptionValue(Shell.START);
+ }
+
+ String prefix = null;
+ if (cmd.hasOption(Shell.PREFIX)) {
+ prefix = cmd.getOptionValue(Shell.PREFIX);
+ }
+
String ozoneURIString = cmd.getOptionValue(Shell.LIST_VOLUME);
URI ozoneURI = verifyURI(ozoneURIString);
@@ -62,11 +88,6 @@ public class ListVolumeHandler extends Handler {
rootName = "hdfs";
}
- if (!cmd.hasOption(Shell.USER)) {
- throw new OzoneRestClientException(
- "User name is needed in listVolume call.");
- }
-
if (cmd.hasOption(Shell.USER)) {
userName = cmd.getOptionValue(Shell.USER);
} else {
@@ -80,7 +101,8 @@ public class ListVolumeHandler extends Handler {
client.setUserAuth(userName);
}
- List<OzoneVolume> volumes = client.listVolumes(userName);
+ List<OzoneVolume> volumes = client.listVolumes(userName, prefix, maxKeys,
+ startVolume);
if (volumes != null) {
if (cmd.hasOption(Shell.VERBOSE)) {
System.out.printf("Found : %d volumes for user : %s %n", volumes.size(),
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef9ba833/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
index 7dc8381..6c80514 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
@@ -20,11 +20,13 @@ package org.apache.hadoop.ozone.web.client;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.Status;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.request.OzoneQuota;
@@ -45,6 +47,7 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -80,7 +83,7 @@ public class TestVolume {
Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
cluster = new MiniOzoneCluster.Builder(conf)
- .setHandlerType(OzoneConsts.OZONE_HANDLER_LOCAL).build();
+ .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
DataNode dataNode = cluster.getDataNodes().get(0);
final int port = dataNode.getInfoPort();
@@ -123,8 +126,9 @@ public class TestVolume {
client.createVolume("testvol", "bilbo", "100TB");
assertFalse(true);
} catch (OzoneException ex) {
- // OZone will throw saying volume already exists
- assertEquals(ex.getShortMessage(),"volumeAlreadyExists");
+ // Ozone will throw saying volume already exists
+ GenericTestUtils.assertExceptionContains(
+ Status.VOLUME_ALREADY_EXISTS.toString(), ex);
}
}
@@ -224,6 +228,58 @@ public class TestVolume {
Assert.assertEquals(volCount / step , pagecount);
}
+ @Test
+ public void testListVolumes() throws OzoneException, IOException {
+ final int volCount = 20;
+ final String user1 = "test-user-a";
+ final String user2 = "test-user-b";
+
+ client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
+ // Create 20 volumes, 10 for user1 and another 10 for user2.
+ for (int x = 0; x < volCount; x++) {
+ String volumeName;
+ String userName;
+
+ if (x % 2 == 0) {
+ // create volume [test-vol0, test-vol2, ..., test-vol18] for user1
+ userName = user1;
+ volumeName = "test-vol" + x;
+ } else {
+ // create volume [test-vol1, test-vol3, ..., test-vol19] for user2
+ userName = user2;
+ volumeName = "test-vol" + x;
+ }
+ OzoneVolume vol = client.createVolume(volumeName, userName, "100TB");
+ assertNotNull(vol);
+ }
+
+ // list all the volumes belong to user1
+ List<OzoneVolume> volumeList = client.listVolumes(user1,
+ null, 100, StringUtils.EMPTY);
+ assertEquals(10, volumeList.size());
+ volumeList.stream()
+ .filter(item -> item.getOwnerName().equals(user1))
+ .collect(Collectors.toList());
+
+ // test max key parameter of listing volumes
+ volumeList = client.listVolumes(user1, null, 2, StringUtils.EMPTY);
+ assertEquals(2, volumeList.size());
+
+ // test prefix parameter of listing volumes
+ volumeList = client.listVolumes(user1, "test-vol10", 100,
+ StringUtils.EMPTY);
+ assertTrue(volumeList.size() == 1
+ && volumeList.get(0).getVolumeName().equals("test-vol10"));
+
+ volumeList = client.listVolumes(user1, "test-vol1",
+ 100, StringUtils.EMPTY);
+ assertEquals(5, volumeList.size());
+
+ // test start key parameter of listing volumes
+ volumeList = client.listVolumes(user2, null, 100, "test-vol17");
+ assertEquals(2, volumeList.size());
+ }
+
/**
* Returns a list of mocked {@link CloseableHttpClient} used for testing.
* The mocked client replaces the actual calls in
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[14/50] [abbrv] hadoop git commit: YARN-6759. Fix
TestRMRestart.testRMRestartWaitForPreviousAMToFinish failure. Contributed by
Naganarasimha G R
Posted by xy...@apache.org.
YARN-6759. Fix TestRMRestart.testRMRestartWaitForPreviousAMToFinish failure. Contributed by Naganarasimha G R
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75c0220b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75c0220b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75c0220b
Branch: refs/heads/HDFS-7240
Commit: 75c0220b4494dd4424a0c531e0bf0a763748dc62
Parents: 4a574e9
Author: bibinchundatt <bi...@apache.org>
Authored: Fri Jul 14 13:53:39 2017 +0530
Committer: bibinchundatt <bi...@apache.org>
Committed: Fri Jul 14 13:53:39 2017 +0530
----------------------------------------------------------------------
.../apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/75c0220b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
index 139e2da..955b4b6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
@@ -105,9 +105,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtils;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
@@ -622,7 +622,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
return new Boolean(rmAppForCheck.getAppAttempts().size() == 4);
}
},
- 100, maxRetry);
+ 100, maxRetry * 100);
Assert.assertEquals(RMAppAttemptState.FAILED,
rmApp.getAppAttempts().get(latestAppAttemptId).getAppAttemptState());
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[28/50] [abbrv] hadoop git commit: HDFS-12138. Remove redundant
'public' modifiers from BlockCollection. Contributed by Chen Liang
Posted by xy...@apache.org.
HDFS-12138. Remove redundant 'public' modifiers from BlockCollection. Contributed by Chen Liang
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed27f2b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed27f2b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed27f2b2
Branch: refs/heads/HDFS-7240
Commit: ed27f2b2cc6093865367b98a100dcd42b2c6b89d
Parents: b0e78ae
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Mon Jul 17 13:54:16 2017 -0700
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Mon Jul 17 13:54:16 2017 -0700
----------------------------------------------------------------------
.../server/blockmanagement/BlockCollection.java | 22 ++++++++++----------
1 file changed, 11 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed27f2b2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index b880590..c0dfc14 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -32,62 +32,62 @@ public interface BlockCollection {
/**
* Get the last block of the collection.
*/
- public BlockInfo getLastBlock();
+ BlockInfo getLastBlock();
/**
* Get content summary.
*/
- public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps)
+ ContentSummary computeContentSummary(BlockStoragePolicySuite bsps)
throws AccessControlException;
/**
* @return the number of blocks or block groups
*/
- public int numBlocks();
+ int numBlocks();
/**
* Get the blocks (striped or contiguous).
*/
- public BlockInfo[] getBlocks();
+ BlockInfo[] getBlocks();
/**
* Get preferred block size for the collection
* @return preferred block size in bytes
*/
- public long getPreferredBlockSize();
+ long getPreferredBlockSize();
/**
* Get block replication for the collection.
* @return block replication value. Return 0 if the file is erasure coded.
*/
- public short getPreferredBlockReplication();
+ short getPreferredBlockReplication();
/**
* @return the storage policy ID.
*/
- public byte getStoragePolicyID();
+ byte getStoragePolicyID();
/**
* Get the name of the collection.
*/
- public String getName();
+ String getName();
/**
* Set the block (contiguous or striped) at the given index.
*/
- public void setBlock(int index, BlockInfo blk);
+ void setBlock(int index, BlockInfo blk);
/**
* Convert the last block of the collection to an under-construction block
* and set the locations.
*/
- public void convertLastBlockToUC(BlockInfo lastBlock,
+ void convertLastBlockToUC(BlockInfo lastBlock,
DatanodeStorageInfo[] targets) throws IOException;
/**
* @return whether the block collection is under construction.
*/
- public boolean isUnderConstruction();
+ boolean isUnderConstruction();
/**
* @return whether the block collection is in striping format
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[48/50] [abbrv] hadoop git commit: HADOOP-14666. Tests use
assertTrue(....equals(...)) instead of assertEquals()
Posted by xy...@apache.org.
HADOOP-14666. Tests use assertTrue(....equals(...)) instead of assertEquals()
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c21c2603
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c21c2603
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c21c2603
Branch: refs/heads/HDFS-7240
Commit: c21c26039238f633a0d2df9670f636d026c35649
Parents: 077fcf6
Author: Daniel Templeton <te...@apache.org>
Authored: Wed Jul 19 13:58:55 2017 -0700
Committer: Daniel Templeton <te...@apache.org>
Committed: Wed Jul 19 13:58:55 2017 -0700
----------------------------------------------------------------------
.../authentication/util/TestCertificateUtil.java | 6 ++++--
.../java/org/apache/hadoop/conf/TestDeprecatedKeys.java | 2 +-
.../apache/hadoop/crypto/key/TestKeyProviderFactory.java | 11 +++++++----
.../src/test/java/org/apache/hadoop/fs/TestHardLink.java | 2 +-
.../security/alias/TestCredentialProviderFactory.java | 10 +++++-----
.../hadoop/security/authorize/TestAccessControlList.java | 8 ++++----
.../apache/hadoop/util/TestReadWriteDiskValidator.java | 5 +++--
7 files changed, 25 insertions(+), 19 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c21c2603/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
index ce4176c..5794eb6 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.security.authentication.util;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -86,8 +88,8 @@ public class TestCertificateUtil {
+ "Mzc1xA==";
try {
RSAPublicKey pk = CertificateUtil.parseRSAPublicKey(pem);
- assertTrue(pk != null);
- assertTrue(pk.getAlgorithm().equals("RSA"));
+ assertNotNull(pk);
+ assertEquals("RSA", pk.getAlgorithm());
} catch (ServletException se) {
fail("Should not have thrown ServletException");
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c21c2603/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
index 3036d0c..167daa5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
@@ -35,7 +35,7 @@ public class TestDeprecatedKeys extends TestCase {
conf.set("topology.script.file.name", "xyz");
conf.set("topology.script.file.name", "xyz");
String scriptFile = conf.get(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
- assertTrue(scriptFile.equals("xyz")) ;
+ assertEquals("xyz", scriptFile) ;
}
//Tests reading / writing a conf file with deprecation after setting
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c21c2603/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
index 53785bc..db30eb0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
@@ -189,8 +189,10 @@ public class TestKeyProviderFactory {
assertTrue("Returned Keys should have included key4.", keys.contains("key4"));
List<KeyVersion> kvl = provider.getKeyVersions("key3");
- assertTrue("KeyVersions should have been returned for key3.", kvl.size() == 1);
- assertTrue("KeyVersions should have included key3@0.", kvl.get(0).getVersionName().equals("key3@0"));
+ assertEquals("KeyVersions should have been returned for key3.",
+ 1, kvl.size());
+ assertEquals("KeyVersions should have included key3@0.",
+ "key3@0", kvl.get(0).getVersionName());
assertArrayEquals(key3, kvl.get(0).getMaterial());
}
@@ -267,7 +269,7 @@ public class TestKeyProviderFactory {
Path path = ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
- assertTrue(s.getPermission().toString().equals("rw-------"));
+ assertEquals("rw-------", s.getPermission().toString());
assertTrue(file + " should exist", file.isFile());
// Corrupt file and Check if JKS can reload from _OLD file
@@ -371,7 +373,8 @@ public class TestKeyProviderFactory {
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
- assertTrue("Permissions should have been retained from the preexisting keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
+ assertEquals("Permissions should have been retained from the preexisting "
+ + "keystore.", "rwxrwxrwx", s.getPermission().toString());
}
@Test
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c21c2603/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java
index b32b95e..b08e15c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java
@@ -320,7 +320,7 @@ public class TestHardLink {
assertEquals(2, ("%f").length());
//make sure "\\%f" was munged correctly
assertEquals(3, ("\\%f").length());
- assertTrue(win.getLinkCountCommand[1].equals("hardlink"));
+ assertEquals("hardlink", win.getLinkCountCommand[1]);
//make sure "-c%h" was not munged
assertEquals(4, ("-c%h").length());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c21c2603/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java
index 6fa5992..ee7e42c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java
@@ -214,7 +214,7 @@ public class TestCredentialProviderFactory {
Path path = ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
- assertTrue(s.getPermission().toString().equals("rw-------"));
+ assertEquals("rw-------", s.getPermission().toString());
assertTrue(file + " should exist", file.isFile());
// check permission retention after explicit change
@@ -236,8 +236,8 @@ public class TestCredentialProviderFactory {
Path path = ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
- assertTrue("Unexpected permissions: " + s.getPermission().toString(),
- s.getPermission().toString().equals("rw-------"));
+ assertEquals("Unexpected permissions: " + s.getPermission().toString(),
+ "rw-------", s.getPermission().toString());
assertTrue(file + " should exist", file.isFile());
// check permission retention after explicit change
@@ -267,8 +267,8 @@ public class TestCredentialProviderFactory {
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
- assertTrue("Permissions should have been retained from the preexisting " +
- "keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
+ assertEquals("Permissions should have been retained from the preexisting " +
+ "keystore.", "rwxrwxrwx", s.getPermission().toString());
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c21c2603/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
index 0868381..7039001 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
@@ -187,18 +187,18 @@ public class TestAccessControlList {
AccessControlList acl;
acl = new AccessControlList("*");
- assertTrue(acl.toString().equals("All users are allowed"));
+ assertEquals("All users are allowed", acl.toString());
validateGetAclString(acl);
acl = new AccessControlList(" ");
- assertTrue(acl.toString().equals("No users are allowed"));
+ assertEquals("No users are allowed", acl.toString());
acl = new AccessControlList("user1,user2");
- assertTrue(acl.toString().equals("Users [user1, user2] are allowed"));
+ assertEquals("Users [user1, user2] are allowed", acl.toString());
validateGetAclString(acl);
acl = new AccessControlList("user1,user2 ");// with space
- assertTrue(acl.toString().equals("Users [user1, user2] are allowed"));
+ assertEquals("Users [user1, user2] are allowed", acl.toString());
validateGetAclString(acl);
acl = new AccessControlList(" group1,group2");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c21c2603/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReadWriteDiskValidator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReadWriteDiskValidator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReadWriteDiskValidator.java
index 46f4033..b50a73f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReadWriteDiskValidator.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReadWriteDiskValidator.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.util;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -125,7 +126,7 @@ public class TestReadWriteDiskValidator {
readWriteDiskValidator.checkStatus(testDir);
fail("Disk check should fail.");
} catch (DiskErrorException e) {
- assertTrue(e.getMessage().equals("Disk Check failed!"));
+ assertEquals("Disk Check failed!", e.getMessage());
}
MetricsSource source = ms.getSource(
@@ -137,7 +138,7 @@ public class TestReadWriteDiskValidator {
readWriteDiskValidator.checkStatus(testDir);
fail("Disk check should fail.");
} catch (DiskErrorException e) {
- assertTrue(e.getMessage().equals("Disk Check failed!"));
+ assertEquals("Disk Check failed!", e.getMessage());
}
source.getMetrics(collector, true);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[13/50] [abbrv] hadoop git commit: YARN-6769. Make schedulables
without demand less needy in FairSharePolicy#compare. (Yunfan Zhou via Yufei
Gu)
Posted by xy...@apache.org.
YARN-6769. Make schedulables without demand less needy in FairSharePolicy#compare. (Yunfan Zhou via Yufei Gu)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a574e9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a574e9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a574e9a
Branch: refs/heads/HDFS-7240
Commit: 4a574e9a84f2e997038452b22f2ad2a2d42e8ac8
Parents: 228ddaa
Author: Yufei Gu <yu...@apache.org>
Authored: Thu Jul 13 23:10:10 2017 -0700
Committer: Yufei Gu <yu...@apache.org>
Committed: Thu Jul 13 23:10:10 2017 -0700
----------------------------------------------------------------------
.../scheduler/fair/policies/FairSharePolicy.java | 17 +++++++++++++++--
.../scheduler/fair/TestSchedulingPolicy.java | 19 ++++++++++++-------
2 files changed, 27 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a574e9a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
index c3ec47a..2a852aa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
@@ -58,6 +58,9 @@ public class FairSharePolicy extends SchedulingPolicy {
/**
* Compare Schedulables via weighted fair sharing. In addition, Schedulables
* below their min share get priority over those whose min share is met.
+ *
+ * Schedulables without resource demand get lower priority than
+ * ones who have demands.
*
* Schedulables below their min share are compared by how far below it they
* are as a ratio. For example, if job A has 8 out of a min share of 10 tasks
@@ -79,6 +82,16 @@ public class FairSharePolicy extends SchedulingPolicy {
@Override
public int compare(Schedulable s1, Schedulable s2) {
+ Resource demand1 = s1.getDemand();
+ Resource demand2 = s2.getDemand();
+ if (demand1.equals(Resources.none()) && Resources.greaterThan(
+ RESOURCE_CALCULATOR, null, demand2, Resources.none())) {
+ return 1;
+ } else if (demand2.equals(Resources.none()) && Resources.greaterThan(
+ RESOURCE_CALCULATOR, null, demand1, Resources.none())) {
+ return -1;
+ }
+
double minShareRatio1, minShareRatio2;
double useToWeightRatio1, useToWeightRatio2;
double weight1, weight2;
@@ -86,9 +99,9 @@ public class FairSharePolicy extends SchedulingPolicy {
Resource resourceUsage1 = s1.getResourceUsage();
Resource resourceUsage2 = s2.getResourceUsage();
Resource minShare1 = Resources.min(RESOURCE_CALCULATOR, null,
- s1.getMinShare(), s1.getDemand());
+ s1.getMinShare(), demand1);
Resource minShare2 = Resources.min(RESOURCE_CALCULATOR, null,
- s2.getMinShare(), s2.getDemand());
+ s2.getMinShare(), demand2);
boolean s1Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
resourceUsage1, minShare1);
boolean s2Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a574e9a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
index d84f0cf..3a16454 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
@@ -123,6 +123,8 @@ public class TestSchedulingPolicy {
private Resource minShare = Resource.newInstance(0, 1);
private Resource demand = Resource.newInstance(4, 1);
+ private Resource[] demandCollection = {
+ Resource.newInstance(0, 0), Resource.newInstance(4, 1) };
private String[] nameCollection = {"A", "B", "C"};
@@ -160,9 +162,11 @@ public class TestSchedulingPolicy {
for (int j = 0; j < startTimeColloection.length; j++) {
for (int k = 0; k < usageCollection.length; k++) {
for (int t = 0; t < weightsCollection.length; t++) {
- genSchedulable.push(createSchedulable(i, j, k, t));
- generateAndTest(genSchedulable);
- genSchedulable.pop();
+ for (int m = 0; m < demandCollection.length; m++) {
+ genSchedulable.push(createSchedulable(m, i, j, k, t));
+ generateAndTest(genSchedulable);
+ genSchedulable.pop();
+ }
}
}
}
@@ -171,10 +175,11 @@ public class TestSchedulingPolicy {
}
private Schedulable createSchedulable(
- int nameIdx, int startTimeIdx, int usageIdx, int weightsIdx) {
- return new MockSchedulable(minShare, demand, nameCollection[nameIdx],
- startTimeColloection[startTimeIdx], usageCollection[usageIdx],
- weightsCollection[weightsIdx]);
+ int demandId, int nameIdx, int startTimeIdx,
+ int usageIdx, int weightsIdx) {
+ return new MockSchedulable(minShare, demandCollection[demandId],
+ nameCollection[nameIdx], startTimeColloection[startTimeIdx],
+ usageCollection[usageIdx], weightsCollection[weightsIdx]);
}
private boolean checkTransitivity(
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[24/50] [abbrv] hadoop git commit: HADOOP-14662. Update azure-storage
sdk to version 5.4.0. Contributed by Thomas Marquardt.
Posted by xy...@apache.org.
HADOOP-14662. Update azure-storage sdk to version 5.4.0.
Contributed by Thomas Marquardt.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06ece483
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06ece483
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06ece483
Branch: refs/heads/HDFS-7240
Commit: 06ece483222b82404ee198159c6866db89043459
Parents: 0a6d5c0
Author: Steve Loughran <st...@apache.org>
Authored: Sat Jul 15 16:27:17 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Sat Jul 15 16:27:17 2017 +0100
----------------------------------------------------------------------
hadoop-project/pom.xml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/06ece483/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 3969474..b9819b4 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1118,7 +1118,7 @@
<dependency>
<groupId>com.microsoft.azure</groupId>
<artifactId>azure-storage</artifactId>
- <version>5.3.0</version>
+ <version>5.4.0</version>
</dependency>
<dependency>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[08/50] [abbrv] hadoop git commit: YARN-6805. NPE in
LinuxContainerExecutor due to null PrivilegedOperationException exit code.
Contributed by Jason Lowe
Posted by xy...@apache.org.
YARN-6805. NPE in LinuxContainerExecutor due to null PrivilegedOperationException exit code. Contributed by Jason Lowe
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f76f5c09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f76f5c09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f76f5c09
Branch: refs/heads/HDFS-7240
Commit: f76f5c0919cdb0b032edb309d137093952e77268
Parents: 5f1ee72
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Thu Jul 13 17:38:17 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Thu Jul 13 17:38:17 2017 -0500
----------------------------------------------------------------------
.../nodemanager/LinuxContainerExecutor.java | 19 +++--
.../PrivilegedOperationException.java | 10 +--
.../runtime/ContainerExecutionException.java | 10 +--
.../TestLinuxContainerExecutorWithMocks.java | 89 ++++++++++++++++++++
4 files changed, 111 insertions(+), 17 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f76f5c09/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 9a3b2d2..47b99c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -275,6 +275,10 @@ public class LinuxContainerExecutor extends ContainerExecutor {
}
}
+ protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
+ return PrivilegedOperationExecutor.getInstance(getConf());
+ }
+
@Override
public void init() throws IOException {
Configuration conf = super.getConf();
@@ -285,7 +289,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
PrivilegedOperation checkSetupOp = new PrivilegedOperation(
PrivilegedOperation.OperationType.CHECK_SETUP);
PrivilegedOperationExecutor privilegedOperationExecutor =
- PrivilegedOperationExecutor.getInstance(conf);
+ getPrivilegedOperationExecutor();
privilegedOperationExecutor.executePrivilegedOperation(checkSetupOp,
false);
@@ -382,7 +386,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
try {
Configuration conf = super.getConf();
PrivilegedOperationExecutor privilegedOperationExecutor =
- PrivilegedOperationExecutor.getInstance(conf);
+ getPrivilegedOperationExecutor();
privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
initializeContainerOp, null, null, false, true);
@@ -530,8 +534,9 @@ public class LinuxContainerExecutor extends ContainerExecutor {
}
builder.append("Stack trace: "
+ StringUtils.stringifyException(e) + "\n");
- if (!e.getOutput().isEmpty()) {
- builder.append("Shell output: " + e.getOutput() + "\n");
+ String output = e.getOutput();
+ if (output!= null && !e.getOutput().isEmpty()) {
+ builder.append("Shell output: " + output + "\n");
}
String diagnostics = builder.toString();
logOutput(diagnostics);
@@ -729,7 +734,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
try {
Configuration conf = super.getConf();
PrivilegedOperationExecutor privilegedOperationExecutor =
- PrivilegedOperationExecutor.getInstance(conf);
+ getPrivilegedOperationExecutor();
privilegedOperationExecutor.executePrivilegedOperation(deleteAsUserOp,
false);
@@ -759,7 +764,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
try {
PrivilegedOperationExecutor privOpExecutor =
- PrivilegedOperationExecutor.getInstance(super.getConf());
+ getPrivilegedOperationExecutor();
String results =
privOpExecutor.executePrivilegedOperation(listAsUserOp, true);
@@ -818,7 +823,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
mountCGroupsOp.appendArgs(cgroupKVs);
PrivilegedOperationExecutor privilegedOperationExecutor =
- PrivilegedOperationExecutor.getInstance(conf);
+ getPrivilegedOperationExecutor();
privilegedOperationExecutor.executePrivilegedOperation(mountCGroupsOp,
false);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f76f5c09/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
index 3622489..9a11194 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
public class PrivilegedOperationException extends YarnException {
private static final long serialVersionUID = 1L;
- private Integer exitCode;
+ private int exitCode = -1;
private String output;
private String errorOutput;
@@ -36,7 +36,7 @@ public class PrivilegedOperationException extends YarnException {
super(message);
}
- public PrivilegedOperationException(String message, Integer exitCode,
+ public PrivilegedOperationException(String message, int exitCode,
String output, String errorOutput) {
super(message);
this.exitCode = exitCode;
@@ -48,8 +48,8 @@ public class PrivilegedOperationException extends YarnException {
super(cause);
}
- public PrivilegedOperationException(Throwable cause, Integer exitCode, String
- output, String errorOutput) {
+ public PrivilegedOperationException(Throwable cause, int exitCode,
+ String output, String errorOutput) {
super(cause);
this.exitCode = exitCode;
this.output = output;
@@ -59,7 +59,7 @@ public class PrivilegedOperationException extends YarnException {
super(message, cause);
}
- public Integer getExitCode() {
+ public int getExitCode() {
return exitCode;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f76f5c09/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
index 1fbece2..3147277 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
@@ -32,10 +32,10 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
@InterfaceStability.Unstable
public class ContainerExecutionException extends YarnException {
private static final long serialVersionUID = 1L;
- private static final Integer EXIT_CODE_UNSET = -1;
+ private static final int EXIT_CODE_UNSET = -1;
private static final String OUTPUT_UNSET = "<unknown>";
- private Integer exitCode;
+ private int exitCode;
private String output;
private String errorOutput;
@@ -54,7 +54,7 @@ public class ContainerExecutionException extends YarnException {
}
- public ContainerExecutionException(String message, Integer exitCode, String
+ public ContainerExecutionException(String message, int exitCode, String
output, String errorOutput) {
super(message);
this.exitCode = exitCode;
@@ -62,7 +62,7 @@ public class ContainerExecutionException extends YarnException {
this.errorOutput = errorOutput;
}
- public ContainerExecutionException(Throwable cause, Integer exitCode, String
+ public ContainerExecutionException(Throwable cause, int exitCode, String
output, String errorOutput) {
super(cause);
this.exitCode = exitCode;
@@ -70,7 +70,7 @@ public class ContainerExecutionException extends YarnException {
this.errorOutput = errorOutput;
}
- public Integer getExitCode() {
+ public int getExitCode() {
return exitCode;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f76f5c09/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
index 07134e8..cfd0e36 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
@@ -23,7 +23,9 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
@@ -40,6 +42,7 @@ import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
+import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -47,6 +50,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -54,6 +59,7 @@ import org.apache.hadoop.yarn.exceptions.ConfigurationException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DefaultLinuxContainerRuntime;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntime;
@@ -516,4 +522,87 @@ public class TestLinuxContainerExecutorWithMocks {
appSubmitter, cmd, "", baseDir0.toString(), baseDir1.toString()),
readMockParams());
}
+
+ @Test
+ public void testNoExitCodeFromPrivilegedOperation() throws Exception {
+ Configuration conf = new Configuration();
+ final PrivilegedOperationExecutor spyPrivilegedExecutor =
+ spy(PrivilegedOperationExecutor.getInstance(conf));
+ doThrow(new PrivilegedOperationException("interrupted"))
+ .when(spyPrivilegedExecutor).executePrivilegedOperation(
+ any(List.class), any(PrivilegedOperation.class),
+ any(File.class), any(Map.class), anyBoolean(), anyBoolean());
+ LinuxContainerRuntime runtime = new DefaultLinuxContainerRuntime(
+ spyPrivilegedExecutor);
+ runtime.initialize(conf);
+ mockExec = new LinuxContainerExecutor(runtime);
+ mockExec.setConf(conf);
+ LinuxContainerExecutor lce = new LinuxContainerExecutor(runtime) {
+ @Override
+ protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
+ return spyPrivilegedExecutor;
+ }
+ };
+ lce.setConf(conf);
+ InetSocketAddress address = InetSocketAddress.createUnresolved(
+ "localhost", 8040);
+ Path nmPrivateCTokensPath= new Path("file:///bin/nmPrivateCTokensPath");
+ LocalDirsHandlerService dirService = new LocalDirsHandlerService();
+ dirService.init(conf);
+
+ String appSubmitter = "nobody";
+ ApplicationId appId = ApplicationId.newInstance(1, 1);
+ ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
+ ContainerId cid = ContainerId.newContainerId(attemptId, 1);
+ HashMap<String, String> env = new HashMap<>();
+ Container container = mock(Container.class);
+ ContainerLaunchContext context = mock(ContainerLaunchContext.class);
+ when(container.getContainerId()).thenReturn(cid);
+ when(container.getLaunchContext()).thenReturn(context);
+ when(context.getEnvironment()).thenReturn(env);
+ Path workDir = new Path("/tmp");
+
+ try {
+ lce.startLocalizer(new LocalizerStartContext.Builder()
+ .setNmPrivateContainerTokens(nmPrivateCTokensPath)
+ .setNmAddr(address)
+ .setUser(appSubmitter)
+ .setAppId(appId.toString())
+ .setLocId("12345")
+ .setDirsHandler(dirService)
+ .build());
+ Assert.fail("startLocalizer should have thrown an exception");
+ } catch (IOException e) {
+ assertTrue("Unexpected exception " + e,
+ e.getMessage().contains("exitCode"));
+ }
+
+ lce.activateContainer(cid, new Path(workDir, "pid.txt"));
+ lce.launchContainer(new ContainerStartContext.Builder()
+ .setContainer(container)
+ .setNmPrivateContainerScriptPath(new Path("file:///bin/echo"))
+ .setNmPrivateTokensPath(new Path("file:///dev/null"))
+ .setUser(appSubmitter)
+ .setAppId(appId.toString())
+ .setContainerWorkDir(workDir)
+ .setLocalDirs(dirsHandler.getLocalDirs())
+ .setLogDirs(dirsHandler.getLogDirs())
+ .setFilecacheDirs(new ArrayList<>())
+ .setUserLocalDirs(new ArrayList<>())
+ .setContainerLocalDirs(new ArrayList<>())
+ .setContainerLogDirs(new ArrayList<>())
+ .build());
+ lce.deleteAsUser(new DeletionAsUserContext.Builder()
+ .setUser(appSubmitter)
+ .setSubDir(new Path("/tmp/testdir"))
+ .build());
+
+ try {
+ lce.mountCgroups(new ArrayList<String>(), "hierarchy");
+ Assert.fail("mountCgroups should have thrown an exception");
+ } catch (IOException e) {
+ assertTrue("Unexpected exception " + e,
+ e.getMessage().contains("exit code"));
+ }
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[37/50] [abbrv] hadoop git commit: YARN-6798. Fix NM startup failure
with old state store due to version mismatch. (Botong Huang via rchiang)
Posted by xy...@apache.org.
YARN-6798. Fix NM startup failure with old state store due to version mismatch. (Botong Huang via rchiang)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5f14a2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5f14a2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5f14a2a
Branch: refs/heads/HDFS-7240
Commit: f5f14a2ad67f91064a73685b44369c6314f0e1cd
Parents: 0b7afc0
Author: Ray Chiang <rc...@apache.org>
Authored: Tue Jul 18 12:35:08 2017 -0700
Committer: Ray Chiang <rc...@apache.org>
Committed: Tue Jul 18 12:35:08 2017 -0700
----------------------------------------------------------------------
.../nodemanager/recovery/NMLeveldbStateStoreService.java | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5f14a2a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index a0502df..c556b39 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -85,7 +85,11 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
private static final String DB_NAME = "yarn-nm-state";
private static final String DB_SCHEMA_VERSION_KEY = "nm-schema-version";
- private static final Version CURRENT_VERSION_INFO = Version.newInstance(3, 0);
+ /**
+ * Changes from 1.0 to 1.1: Save AMRMProxy state in NMSS.
+ * Changes from 1.2 to 1.2: Save queued container information.
+ */
+ private static final Version CURRENT_VERSION_INFO = Version.newInstance(1, 2);
private static final String DELETION_TASK_KEY_PREFIX =
"DeletionService/deltask_";
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[41/50] [abbrv] hadoop git commit: HADOOP-14642. wasb: add support
for caching Authorization and SASKeys. Contributed by Sivaguru Sankaridurg.
Posted by xy...@apache.org.
HADOOP-14642. wasb: add support for caching Authorization and SASKeys. Contributed by Sivaguru Sankaridurg.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2843c688
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2843c688
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2843c688
Branch: refs/heads/HDFS-7240
Commit: 2843c688bcc21c65eb3538ffb3caeaffe440eda8
Parents: 845c4e5
Author: Jitendra Pandey <ji...@apache.org>
Authored: Wed Jul 19 00:13:06 2017 -0700
Committer: Jitendra Pandey <ji...@apache.org>
Committed: Wed Jul 19 00:13:06 2017 -0700
----------------------------------------------------------------------
.../src/main/resources/core-default.xml | 9 +-
.../conf/TestCommonConfigurationFields.java | 1 +
.../hadoop/fs/azure/CachingAuthorizer.java | 232 +++++++++++++++++++
.../fs/azure/LocalSASKeyGeneratorImpl.java | 28 ++-
.../hadoop/fs/azure/NativeAzureFileSystem.java | 3 -
.../fs/azure/RemoteSASKeyGeneratorImpl.java | 46 +++-
.../fs/azure/RemoteWasbAuthorizerImpl.java | 38 ++-
.../hadoop/fs/azure/SASKeyGeneratorImpl.java | 4 +-
.../hadoop-azure/src/site/markdown/index.md | 38 +++
.../hadoop/fs/azure/AbstractWasbTestBase.java | 5 +
.../hadoop/fs/azure/MockWasbAuthorizerImpl.java | 22 +-
.../TestNativeAzureFSAuthorizationCaching.java | 60 +++++
.../TestNativeAzureFileSystemAuthorization.java | 86 ++-----
...veAzureFileSystemAuthorizationWithOwner.java | 2 +-
.../fs/azure/TestWasbRemoteCallHelper.java | 6 +-
.../src/test/resources/azure-test.xml | 3 +-
16 files changed, 499 insertions(+), 84 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index a705a4e..68b0a9d 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1343,7 +1343,14 @@
configuration
</description>
</property>
-
+<property>
+ <name>fs.azure.authorization.caching.enable</name>
+ <value>true</value>
+ <description>
+ Config flag to enable caching of authorization results and saskeys in WASB.
+ This flag is relevant only when fs.azure.authorization is enabled.
+ </description>
+</property>
<property>
<name>io.seqfile.compress.blocksize</name>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 8524973..593254eb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -115,6 +115,7 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase {
xmlPropsToSkipCompare.add("fs.azure.local.sas.key.mode");
xmlPropsToSkipCompare.add("fs.azure.secure.mode");
xmlPropsToSkipCompare.add("fs.azure.authorization");
+ xmlPropsToSkipCompare.add("fs.azure.authorization.caching.enable");
// Deprecated properties. These should eventually be removed from the
// class.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
new file mode 100644
index 0000000..016ae74
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
@@ -0,0 +1,232 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+import com.google.common.cache.Cache;
+import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.cache.CacheBuilder;
+
+/**
+ * Class that provides caching for Authorize and getSasUri calls
+ * @param <K> - The cache key type
+ * @param <V> - The cached value type
+ */
+public class CachingAuthorizer<K, V> {
+
+ public static final Logger LOG = LoggerFactory
+ .getLogger(CachingAuthorizer.class);
+
+ private Cache<K, V> cache;
+ private boolean isEnabled = false;
+ private long cacheEntryExpiryPeriodInMinutes;
+ private String label;
+
+ public static final String KEY_AUTH_SERVICE_CACHING_ENABLE =
+ "fs.azure.authorization.caching.enable";
+
+ public static final boolean KEY_AUTH_SERVICE_CACHING_ENABLE_DEFAULT = false;
+
+ public static final String KEY_AUTH_SERVICE_CACHING_MAX_ENTRIES =
+ "fs.azure.authorization.caching.maxentries";
+
+ public static final int KEY_AUTH_SERVICE_CACHING_MAX_ENTRIES_DEFAULT = 512;
+
+ public CachingAuthorizer(long ttlInMinutes, String label) {
+ cacheEntryExpiryPeriodInMinutes = ttlInMinutes;
+ this.label = label;
+ if (cacheEntryExpiryPeriodInMinutes <= 0) {
+ isEnabled = false;
+ }
+ }
+
+
+ public void init(Configuration conf) {
+
+ isEnabled = conf.getBoolean(KEY_AUTH_SERVICE_CACHING_ENABLE, KEY_AUTH_SERVICE_CACHING_ENABLE_DEFAULT);
+
+ if (isEnabled) {
+ LOG.debug("{} : Initializing CachingAuthorizer instance", label);
+ cache = CacheBuilder.newBuilder()
+ .maximumSize(
+ conf.getInt(
+ KEY_AUTH_SERVICE_CACHING_MAX_ENTRIES,
+ KEY_AUTH_SERVICE_CACHING_MAX_ENTRIES_DEFAULT
+ )
+ )
+ .expireAfterWrite(cacheEntryExpiryPeriodInMinutes, TimeUnit.MINUTES)
+ .build();
+ }
+ }
+
+ /**
+ * @param key - Cache key
+ * @return null on cache-miss. true/false on cache-hit
+ */
+ public V get(K key) {
+ if (!isEnabled) {
+ return null;
+ }
+
+ V result = cache.getIfPresent(key);
+ if (result == null) {
+ LOG.debug("{}: CACHE MISS: {}", label, key.toString());
+ }
+ else {
+ LOG.debug("{}: CACHE HIT: {}, {}", label, key.toString(), result.toString());
+ }
+ return result;
+ }
+
+ public void put(K key, V value) {
+ if (isEnabled) {
+ LOG.debug("{}: CACHE PUT: {}, {}", label, key.toString(), value.toString());
+ cache.put(key, value);
+ }
+ }
+
+ public void clear() {
+ if (isEnabled) {
+ cache.invalidateAll();
+ }
+ }
+}
+
+/**
+ * POJO representing the cache key for authorization calls
+ */
+class CachedAuthorizerEntry {
+
+ private String path;
+ private String accessType;
+ private String owner;
+
+ CachedAuthorizerEntry(String path, String accessType, String owner) {
+ this.path = path;
+ this.accessType = accessType;
+ this.owner = owner;
+ }
+
+ public String getPath() {
+ return path;
+ }
+
+ public String getAccessType() {
+ return accessType;
+ }
+
+ public String getOwner() {
+ return owner;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o == this) {
+ return true;
+ }
+
+ if (o == null) {
+ return false;
+ }
+
+ if (!(o instanceof CachedAuthorizerEntry)) {
+ return false;
+ }
+
+ CachedAuthorizerEntry c = (CachedAuthorizerEntry) o;
+ return
+ this.getPath().equals(c.getPath())
+ && this.getAccessType().equals(c.getAccessType())
+ && this.getOwner().equals(c.getOwner());
+ }
+
+ @Override
+ public int hashCode() {
+ return this.toString().hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return path + ":" + accessType + ":" + owner;
+ }
+
+}
+
+
+/**
+ * POJO representing the cache key for sas-key calls
+ */
+class CachedSASKeyEntry {
+
+ private String storageAccount;
+ private String container;
+ private String path;
+
+ CachedSASKeyEntry(String storageAccount, String container, String path) {
+ this.storageAccount = storageAccount;
+ this.container = container;
+ this.path = path;
+ }
+
+ public String getStorageAccount() {
+ return storageAccount;
+ }
+
+ public String getContainer() {
+ return container;
+ }
+
+ public String getPath() {
+ return path;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o == this) {
+ return true;
+ }
+
+ if (o == null) {
+ return false;
+ }
+
+ if (!(o instanceof CachedSASKeyEntry)) {
+ return false;
+ }
+
+ CachedSASKeyEntry c = (CachedSASKeyEntry) o;
+ return
+ this.getStorageAccount().equals(c.getStorageAccount())
+ && this.getContainer().equals(c.getContainer())
+ && this.getPath().equals(c.getPath());
+ }
+
+ @Override
+ public int hashCode() {
+ return this.toString().hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return storageAccount + ":" + container + ":" + path;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/LocalSASKeyGeneratorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/LocalSASKeyGeneratorImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/LocalSASKeyGeneratorImpl.java
index e6f1597..0e2fd50 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/LocalSASKeyGeneratorImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/LocalSASKeyGeneratorImpl.java
@@ -58,11 +58,14 @@ public class LocalSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
* Map to cache CloudStorageAccount instances.
*/
private Map<String, CloudStorageAccount> storageAccountMap;
-
+ private CachingAuthorizer<CachedSASKeyEntry, URI> cache;
private static final int HOURS_IN_DAY = 24;
+
public LocalSASKeyGeneratorImpl(Configuration conf) {
super(conf);
storageAccountMap = new HashMap<String, CloudStorageAccount>();
+ cache = new CachingAuthorizer<>(getSasKeyExpiryPeriod(), "SASKEY");
+ cache.init(conf);
}
/**
@@ -74,11 +77,19 @@ public class LocalSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
try {
+ CachedSASKeyEntry cacheKey = new CachedSASKeyEntry(accountName, container, "/");
+ URI cacheResult = cache.get(cacheKey);
+ if (cacheResult != null) {
+ return cacheResult;
+ }
+
CloudStorageAccount account =
getSASKeyBasedStorageAccountInstance(accountName);
CloudBlobClient client = account.createCloudBlobClient();
- return client.getCredentials().transformUri(
+ URI sasKey = client.getCredentials().transformUri(
client.getContainerReference(container).getUri());
+ cache.put(cacheKey, sasKey);
+ return sasKey;
} catch (StorageException stoEx) {
throw new SASKeyGenerationException("Encountered StorageException while"
@@ -146,7 +157,16 @@ public class LocalSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
CloudBlobContainer sc = null;
CloudBlobClient client = null;
+ CachedSASKeyEntry cacheKey = null;
+
try {
+
+ cacheKey = new CachedSASKeyEntry(accountName, container, relativePath);
+ URI cacheResult = cache.get(cacheKey);
+ if (cacheResult != null) {
+ return cacheResult;
+ }
+
CloudStorageAccount account =
getSASKeyBasedStorageAccountInstance(accountName);
client = account.createCloudBlobClient();
@@ -175,7 +195,9 @@ public class LocalSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
}
try {
- return client.getCredentials().transformUri(blob.getUri());
+ URI sasKey = client.getCredentials().transformUri(blob.getUri());
+ cache.put(cacheKey, sasKey);
+ return sasKey;
} catch (StorageException stoEx) {
throw new SASKeyGenerationException("Encountered StorageException while "
+ "generating SAS key for Blob: " + relativePath + " inside "
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 76c6083..a7558a3 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -2113,9 +2113,6 @@ public class NativeAzureFileSystem extends FileSystem {
// Capture the absolute path and the path to key.
Path absolutePath = makeAbsolute(f);
-
- performAuthCheck(absolutePath, WasbAuthorizationOperations.READ, "getFileStatus", absolutePath);
-
String key = pathToKey(absolutePath);
if (key.length() == 0) { // root always exists
return newDirectory(null, absolutePath);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
index a7cedea..473fa54 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
@@ -22,6 +22,7 @@ import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
+import java.util.concurrent.TimeUnit;
import com.fasterxml.jackson.databind.ObjectReader;
import org.apache.hadoop.conf.Configuration;
@@ -106,12 +107,21 @@ public class RemoteSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
private static final String
SAS_KEY_GENERATOR_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
"10,3,100,2";
+ /**
+ * Saskey caching period
+ */
+ private static final String SASKEY_CACHEENTRY_EXPIRY_PERIOD =
+ "fs.azure.saskey.cacheentry.expiry.period";
private WasbRemoteCallHelper remoteCallHelper = null;
private boolean isKerberosSupportEnabled;
private boolean isSpnegoTokenCacheEnabled;
private RetryPolicy retryPolicy;
private String[] commaSeparatedUrls;
+ private CachingAuthorizer<CachedSASKeyEntry, URI> cache;
+
+ private static final int HOURS_IN_DAY = 24;
+ private static final int MINUTES_IN_HOUR = 60;
public RemoteSASKeyGeneratorImpl(Configuration conf) {
super(conf);
@@ -141,6 +151,18 @@ public class RemoteSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
} else {
this.remoteCallHelper = new WasbRemoteCallHelper(retryPolicy);
}
+
+ /* Expire the cache entry five minutes before the actual saskey expiry, so that we never encounter a case
+ * where a stale sas-key-entry is picked up from the cache; which is expired on use.
+ */
+ long sasKeyExpiryPeriodInMinutes = getSasKeyExpiryPeriod() * HOURS_IN_DAY * MINUTES_IN_HOUR; // sas-expiry is in days, convert into mins
+ long cacheEntryDurationInMinutes =
+ conf.getTimeDuration(SASKEY_CACHEENTRY_EXPIRY_PERIOD, sasKeyExpiryPeriodInMinutes, TimeUnit.MINUTES);
+ cacheEntryDurationInMinutes = (cacheEntryDurationInMinutes > (sasKeyExpiryPeriodInMinutes - 5))
+ ? (sasKeyExpiryPeriodInMinutes - 5)
+ : cacheEntryDurationInMinutes;
+ this.cache = new CachingAuthorizer<>(cacheEntryDurationInMinutes, "SASKEY");
+ this.cache.init(conf);
LOG.debug("Initialization of RemoteSASKeyGenerator instance successful");
}
@@ -149,6 +171,13 @@ public class RemoteSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
String container) throws SASKeyGenerationException {
RemoteSASKeyGenerationResponse sasKeyResponse = null;
try {
+ CachedSASKeyEntry cacheKey = new CachedSASKeyEntry(storageAccount, container, "/");
+ URI cacheResult = cache.get(cacheKey);
+ if (cacheResult != null) {
+ return cacheResult;
+ }
+
+ LOG.debug("Generating Container SAS Key: Storage Account {}, Container {}", storageAccount, container);
URIBuilder uriBuilder = new URIBuilder();
uriBuilder.setPath("/" + CONTAINER_SAS_OP);
uriBuilder.addParameter(STORAGE_ACCOUNT_QUERY_PARAM_NAME, storageAccount);
@@ -160,7 +189,9 @@ public class RemoteSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
uriBuilder.getQueryParams());
if (sasKeyResponse.getResponseCode() == REMOTE_CALL_SUCCESS_CODE) {
- return new URI(sasKeyResponse.getSasKey());
+ URI sasKey = new URI(sasKeyResponse.getSasKey());
+ cache.put(cacheKey, sasKey);
+ return sasKey;
} else {
throw new SASKeyGenerationException(
"Remote Service encountered error in SAS Key generation : "
@@ -178,6 +209,15 @@ public class RemoteSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
String container, String relativePath) throws SASKeyGenerationException {
try {
+ CachedSASKeyEntry cacheKey = new CachedSASKeyEntry(storageAccount, container, relativePath);
+ URI cacheResult = cache.get(cacheKey);
+ if (cacheResult != null) {
+ return cacheResult;
+ }
+
+ LOG.debug("Generating RelativePath SAS Key for relativePath {} inside Container {} inside Storage Account {}",
+ relativePath, container, storageAccount);
+
URIBuilder uriBuilder = new URIBuilder();
uriBuilder.setPath("/" + BLOB_SAS_OP);
uriBuilder.addParameter(STORAGE_ACCOUNT_QUERY_PARAM_NAME, storageAccount);
@@ -190,7 +230,9 @@ public class RemoteSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
makeRemoteRequest(commaSeparatedUrls, uriBuilder.getPath(),
uriBuilder.getQueryParams());
if (sasKeyResponse.getResponseCode() == REMOTE_CALL_SUCCESS_CODE) {
- return new URI(sasKeyResponse.getSasKey());
+ URI sasKey = new URI(sasKeyResponse.getSasKey());
+ cache.put(cacheKey, sasKey);
+ return sasKey;
} else {
throw new SASKeyGenerationException(
"Remote Service encountered error in SAS Key generation : "
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
index cd4e0a3..ea77510 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
@@ -33,6 +33,7 @@ import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.utils.URIBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.concurrent.TimeUnit;
import java.io.IOException;
@@ -95,11 +96,18 @@ public class RemoteWasbAuthorizerImpl implements WasbAuthorizerInterface {
private static final String AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
"10,3,100,2";
+ /**
+ * Authorization caching period
+ */
+ private static final String AUTHORIZATION_CACHEENTRY_EXPIRY_PERIOD =
+ "fs.azure.authorization.cacheentry.expiry.period";
+
private WasbRemoteCallHelper remoteCallHelper = null;
private boolean isKerberosSupportEnabled;
private boolean isSpnegoTokenCacheEnabled;
private RetryPolicy retryPolicy;
private String[] commaSeparatedUrls = null;
+ private CachingAuthorizer<CachedAuthorizerEntry, Boolean> cache;
@VisibleForTesting public void updateWasbRemoteCallHelper(
WasbRemoteCallHelper helper) {
@@ -108,7 +116,7 @@ public class RemoteWasbAuthorizerImpl implements WasbAuthorizerInterface {
@Override
public void init(Configuration conf)
- throws WasbAuthorizationException, IOException {
+ throws IOException {
LOG.debug("Initializing RemoteWasbAuthorizerImpl instance");
this.isKerberosSupportEnabled =
conf.getBoolean(Constants.AZURE_KERBEROS_SUPPORT_PROPERTY_NAME, false);
@@ -131,14 +139,38 @@ public class RemoteWasbAuthorizerImpl implements WasbAuthorizerInterface {
} else {
this.remoteCallHelper = new WasbRemoteCallHelper(retryPolicy);
}
+
+ this.cache = new CachingAuthorizer<>(
+ conf.getTimeDuration(AUTHORIZATION_CACHEENTRY_EXPIRY_PERIOD, 5L, TimeUnit.MINUTES), "AUTHORIZATION"
+ );
+ this.cache.init(conf);
}
@Override
public boolean authorize(String wasbAbsolutePath, String accessType, String resourceOwner)
- throws WasbAuthorizationException, IOException {
+ throws IOException {
+
+ /* Make an exception for the internal -RenamePending files */
+ if (wasbAbsolutePath.endsWith(NativeAzureFileSystem.FolderRenamePending.SUFFIX)) {
+ return true;
+ }
+
+ CachedAuthorizerEntry cacheKey = new CachedAuthorizerEntry(wasbAbsolutePath, accessType, resourceOwner);
+ Boolean cacheresult = cache.get(cacheKey);
+ if (cacheresult != null) {
+ return cacheresult;
+ }
+
+ boolean authorizeresult = authorizeInternal(wasbAbsolutePath, accessType, resourceOwner);
+ cache.put(cacheKey, authorizeresult);
+
+ return authorizeresult;
+ }
+
+ private boolean authorizeInternal(String wasbAbsolutePath, String accessType, String resourceOwner)
+ throws IOException {
try {
- /* Make an exception for the internal -RenamePending files */
final URIBuilder uriBuilder = new URIBuilder();
uriBuilder.setPath("/" + CHECK_AUTHORIZATION_OP);
uriBuilder
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SASKeyGeneratorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SASKeyGeneratorImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SASKeyGeneratorImpl.java
index 4acd6e4..1a8e754 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SASKeyGeneratorImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SASKeyGeneratorImpl.java
@@ -38,7 +38,7 @@ public abstract class SASKeyGeneratorImpl implements SASKeyGeneratorInterface {
/**
* Default value for the SAS key expiry period in days. {@value}
*/
- public static final long DEFAUL_CONTAINER_SAS_KEY_PERIOD = 90;
+ public static final long DEFAULT_CONTAINER_SAS_KEY_PERIOD = 90;
private long sasKeyExpiryPeriod;
@@ -47,7 +47,7 @@ public abstract class SASKeyGeneratorImpl implements SASKeyGeneratorInterface {
public SASKeyGeneratorImpl(Configuration conf) {
this.conf = conf;
this.sasKeyExpiryPeriod = conf.getTimeDuration(
- KEY_SAS_KEY_EXPIRY_PERIOD, DEFAUL_CONTAINER_SAS_KEY_PERIOD,
+ KEY_SAS_KEY_EXPIRY_PERIOD, DEFAULT_CONTAINER_SAS_KEY_PERIOD,
TimeUnit.DAYS);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/site/markdown/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/index.md b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
index 74717be..7415e29 100644
--- a/hadoop-tools/hadoop-azure/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
@@ -425,6 +425,44 @@ value takes a comma seperated list of user names who are allowed to perform chow
</property>
```
+Caching of both SAS keys and Authorization responses can be enabled using the following setting:
+The cache settings are applicable only when fs.azure.authorization is enabled.
+The cache is maintained at a filesystem object level.
+```
+ <property>
+ <name>fs.azure.authorization.caching.enable</name>
+ <value>true</value>
+ </property>
+```
+
+The maximum number of entries that that cache can hold can be customized using the following setting:
+```
+ <property>
+ <name>fs.azure.authorization.caching.maxentries</name>
+ <value>512</value>
+ </property>
+```
+
+ The validity of an authorization cache-entry can be controlled using the following setting:
+ Setting the value to zero disables authorization-caching.
+ If the key is not specified, a default expiry duration of 5m takes effect.
+ ```
+ <property>
+ <name>fs.azure.authorization.cacheentry.expiry.period</name>
+ <value>5m</value>
+ </property>
+```
+
+ The validity of a SASKey cache-entry can be controlled using the following setting.
+ Setting the value to zero disables SASKey-caching.
+ If the key is not specified, the default expiry duration specified in the sas-key request takes effect.
+ ```
+ <property>
+ <name>fs.azure.saskey.cacheentry.expiry.period</name>
+ <value>90d</value>
+ </property>
+```
+
## Testing the hadoop-azure Module
The hadoop-azure module includes a full suite of unit tests. Most of the tests
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
index 6ae18fe..51867cd 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.fs.azure;
import static org.junit.Assume.assumeNotNull;
import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
import org.junit.After;
import org.junit.Before;
import org.slf4j.Logger;
@@ -60,6 +61,10 @@ public abstract class AbstractWasbTestBase {
}
}
+ public Configuration getConfiguration() {
+ return new Configuration();
+ }
+
protected abstract AzureBlobStorageTestAccount createTestAccount()
throws Exception;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
index 90a6b51..9fbab49 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockWasbAuthorizerImpl.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.azure;
import java.util.HashMap;
import java.util.Map;
+import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import org.apache.hadoop.security.UserGroupInformation;
@@ -35,6 +36,7 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
private Map<AuthorizationComponent, Boolean> authRules;
private boolean performOwnerMatch;
+ private CachingAuthorizer<CachedAuthorizerEntry, Boolean> cache;
// The full qualified URL to the root directory
private String qualifiedPrefixUrl;
@@ -42,6 +44,7 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
public MockWasbAuthorizerImpl(NativeAzureFileSystem fs) {
qualifiedPrefixUrl = new Path("/").makeQualified(fs.getUri(), fs.getWorkingDirectory())
.toString().replaceAll("/$", "");
+ cache = new CachingAuthorizer<>(TimeUnit.MINUTES.convert(5L, TimeUnit.MINUTES), "AUTHORIZATION");
}
@Override
@@ -54,7 +57,8 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
if currentUserShortName is set to a string that is not empty
*/
public void init(Configuration conf, boolean matchOwner) {
- authRules = new HashMap<AuthorizationComponent, Boolean>();
+ cache.init(conf);
+ authRules = new HashMap<>();
this.performOwnerMatch = matchOwner;
}
@@ -76,6 +80,21 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
return true;
}
+ CachedAuthorizerEntry cacheKey = new CachedAuthorizerEntry(wasbAbsolutePath, accessType, owner);
+ Boolean cacheresult = cache.get(cacheKey);
+ if (cacheresult != null) {
+ return cacheresult;
+ }
+
+ boolean authorizeresult = authorizeInternal(wasbAbsolutePath, accessType, owner);
+ cache.put(cacheKey, authorizeresult);
+
+ return authorizeresult;
+ }
+
+ private boolean authorizeInternal(String wasbAbsolutePath, String accessType, String owner)
+ throws WasbAuthorizationException {
+
String currentUserShortName = "";
if (this.performOwnerMatch) {
try {
@@ -120,6 +139,7 @@ public class MockWasbAuthorizerImpl implements WasbAuthorizerInterface {
public void deleteAllAuthRules() {
authRules.clear();
+ cache.clear();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthorizationCaching.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthorizationCaching.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthorizationCaching.java
new file mode 100644
index 0000000..84558f8
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthorizationCaching.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.Test;
+
+import static org.apache.hadoop.fs.azure.CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE;
+
+/**
+ * Test class to hold all WASB authorization caching related tests.
+ */
+public class TestNativeAzureFSAuthorizationCaching
+ extends TestNativeAzureFileSystemAuthorizationWithOwner {
+
+ private static final int DUMMY_TTL_VALUE = 5000;
+
+ @Override
+ public Configuration getConfiguration() {
+ Configuration conf = super.getConfiguration();
+ conf.set(KEY_AUTH_SERVICE_CACHING_ENABLE, "true");
+ return conf;
+ }
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ Configuration conf = getConfiguration();
+ return AzureBlobStorageTestAccount.create(conf);
+ }
+
+ /**
+ * Test to verify cache behavior -- assert that PUT overwrites value if present
+ */
+ @Test
+ public void testCachePut() throws Throwable {
+ CachingAuthorizer<String, Integer> cache = new CachingAuthorizer<>(DUMMY_TTL_VALUE, "TEST");
+ cache.init(getConfiguration());
+ cache.put("TEST", 1);
+ cache.put("TEST", 3);
+ int result = cache.get("TEST");
+ ContractTestUtils.assertTrue("Cache returned unexpected result", result == 3);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
index 862fd48..91d6ebb 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java
@@ -49,11 +49,17 @@ public class TestNativeAzureFileSystemAuthorization
protected MockWasbAuthorizerImpl authorizer;
@Override
- protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
- Configuration conf = new Configuration();
+ public Configuration getConfiguration() {
+ Configuration conf = super.getConfiguration();
conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, "http://localhost/");
conf.set(NativeAzureFileSystem.AZURE_CHOWN_USERLIST_PROPERTY_NAME, "user1 , user2");
+ return conf;
+ }
+
+ @Override
+ protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+ Configuration conf = getConfiguration();
return AzureBlobStorageTestAccount.create(conf);
}
@@ -66,7 +72,8 @@ public class TestNativeAzureFileSystemAuthorization
useSecureMode && useAuthorization);
authorizer = new MockWasbAuthorizerImpl(fs);
- authorizer.init(null);
+ authorizer.init(fs.getConf());
+ fs.updateWasbAuthorizer(authorizer);
}
@@ -109,7 +116,6 @@ public class TestNativeAzureFileSystemAuthorization
Path testPath = new Path(parentDir, "test.dat");
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
try {
@@ -135,7 +141,6 @@ public class TestNativeAzureFileSystemAuthorization
Path testPath = new Path(parentDir, "test.dat");
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
try {
@@ -163,18 +168,14 @@ public class TestNativeAzureFileSystemAuthorization
setExpectedFailureMessage("create", testPath);
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
- boolean initialCreateSucceeded = false;
try {
fs.create(testPath);
ContractTestUtils.assertPathExists(fs, "testPath was not created", testPath);
- initialCreateSucceeded = true;
fs.create(testPath, true);
}
finally {
- ContractTestUtils.assertTrue(initialCreateSucceeded);
fs.delete(testPath, false);
}
}
@@ -191,19 +192,15 @@ public class TestNativeAzureFileSystemAuthorization
Path testPath = new Path(parentDir, "test.dat");
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
fs.updateWasbAuthorizer(authorizer);
- boolean initialCreateSucceeded = false;
try {
fs.create(testPath);
ContractTestUtils.assertPathExists(fs, "testPath was not created", testPath);
- initialCreateSucceeded = true;
fs.create(testPath, true);
}
finally {
- ContractTestUtils.assertTrue(initialCreateSucceeded);
fs.delete(testPath, false);
}
}
@@ -299,8 +296,6 @@ public class TestNativeAzureFileSystemAuthorization
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); /* to create parentDir */
authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true); /* for rename */
- authorizer.addAuthRule(srcPath.toString(), WasbAuthorizationOperations.READ.toString(), true); /* for exists */
- authorizer.addAuthRule(dstPath.toString(), WasbAuthorizationOperations.READ.toString(), true); /* for exists */
fs.updateWasbAuthorizer(authorizer);
try {
@@ -331,8 +326,6 @@ public class TestNativeAzureFileSystemAuthorization
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); /* to create parent dir */
authorizer.addAuthRule(parentDir.toString(), WasbAuthorizationOperations.WRITE.toString(), false);
- authorizer.addAuthRule(srcPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
- authorizer.addAuthRule(dstPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
try {
@@ -365,8 +358,6 @@ public class TestNativeAzureFileSystemAuthorization
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); /* to create parent dir */
authorizer.addAuthRule(parentSrcDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
authorizer.addAuthRule(parentDstDir.toString(), WasbAuthorizationOperations.WRITE.toString(), false);
- authorizer.addAuthRule(srcPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
- authorizer.addAuthRule(dstPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
try {
@@ -396,8 +387,6 @@ public class TestNativeAzureFileSystemAuthorization
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); /* to create parent dirs */
authorizer.addAuthRule(parentSrcDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
authorizer.addAuthRule(parentDstDir.toString(), WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(srcPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
- authorizer.addAuthRule(dstPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
try {
@@ -505,7 +494,6 @@ public class TestNativeAzureFileSystemAuthorization
Path testPath = new Path(parentDir, "test.dat");
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
try {
fs.create(testPath);
@@ -530,7 +518,6 @@ public class TestNativeAzureFileSystemAuthorization
setExpectedFailureMessage("delete", testPath);
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
try {
fs.create(testPath);
@@ -548,7 +535,6 @@ public class TestNativeAzureFileSystemAuthorization
/* Restore permissions to force a successful delete */
authorizer.deleteAllAuthRules();
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
fs.delete(testPath, false);
@@ -570,7 +556,6 @@ public class TestNativeAzureFileSystemAuthorization
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true); // for create and delete
authorizer.addAuthRule("/testDeleteIntermediateFolder*",
WasbAuthorizationOperations.WRITE.toString(), true); // for recursive delete
- authorizer.addAuthRule("/*", WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
try {
@@ -586,34 +571,13 @@ public class TestNativeAzureFileSystemAuthorization
}
/**
- * Positive test for getFileStatus
+ * Positive test for getFileStatus. No permissions are required for getting filestatus.
* @throws Throwable
*/
@Test
public void testGetFileStatusPositive() throws Throwable {
Path testPath = new Path("/");
-
- authorizer.addAuthRule("/", WasbAuthorizationOperations.READ.toString(), true);
- fs.updateWasbAuthorizer(authorizer);
-
- ContractTestUtils.assertIsDirectory(fs, testPath);
- }
-
- /**
- * Negative test for getFileStatus
- * @throws Throwable
- */
- @Test //(expected=WasbAuthorizationException.class)
- public void testGetFileStatusNegative() throws Throwable {
-
- Path testPath = new Path("/");
-
- setExpectedFailureMessage("getFileStatus", testPath);
-
- authorizer.addAuthRule("/", WasbAuthorizationOperations.READ.toString(), false);
- fs.updateWasbAuthorizer(authorizer);
-
ContractTestUtils.assertIsDirectory(fs, testPath);
}
@@ -627,7 +591,6 @@ public class TestNativeAzureFileSystemAuthorization
Path testPath = new Path("/testMkdirsAccessCheckPositive/1/2/3");
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
try {
@@ -652,7 +615,6 @@ public class TestNativeAzureFileSystemAuthorization
setExpectedFailureMessage("mkdirs", testPath);
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), false);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
try {
@@ -686,13 +648,12 @@ public class TestNativeAzureFileSystemAuthorization
*/
@Test
public void testSetOwnerThrowsForUnauthorisedUsers() throws Throwable {
+
expectedEx.expect(WasbAuthorizationException.class);
Path testPath = new Path("/testSetOwnerNegative");
- MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
- authorizer.init(null);
+
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
String owner = null;
@@ -723,11 +684,10 @@ public class TestNativeAzureFileSystemAuthorization
* */
@Test
public void testSetOwnerSucceedsForAuthorisedUsers() throws Throwable {
- Path testPath = new Path("/testsetownerpositive");
- MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
- authorizer.init(null);
+
+ Path testPath = new Path("/testSetOwnerPositive");
+
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
String newOwner = "newowner";
@@ -765,14 +725,14 @@ public class TestNativeAzureFileSystemAuthorization
* */
@Test
public void testSetOwnerSucceedsForAnyUserWhenWildCardIsSpecified() throws Throwable {
+
Configuration conf = fs.getConf();
conf.set(NativeAzureFileSystem.AZURE_CHOWN_USERLIST_PROPERTY_NAME, "*");
- Path testPath = new Path("/testsetownerpositivewildcard");
+ fs.setConf(conf);
+ Path testPath = new Path("/testSetOwnerPositiveWildcard");
- MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
- authorizer.init(null);
+ authorizer.init(conf);
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
String newOwner = "newowner";
@@ -809,16 +769,16 @@ public class TestNativeAzureFileSystemAuthorization
*/
@Test
public void testSetOwnerFailsForIllegalSetup() throws Throwable {
+
expectedEx.expect(IllegalArgumentException.class);
Configuration conf = fs.getConf();
conf.set(NativeAzureFileSystem.AZURE_CHOWN_USERLIST_PROPERTY_NAME, "user1, *");
+ fs.setConf(conf);
Path testPath = new Path("/testSetOwnerFailsForIllegalSetup");
- MockWasbAuthorizerImpl authorizer = new MockWasbAuthorizerImpl(fs);
- authorizer.init(null);
+ authorizer.init(conf);
authorizer.addAuthRule("/", WasbAuthorizationOperations.WRITE.toString(), true);
- authorizer.addAuthRule(testPath.toString(), WasbAuthorizationOperations.READ.toString(), true);
fs.updateWasbAuthorizer(authorizer);
String owner = null;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
index 3329e67..4bd4633 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorizationWithOwner.java
@@ -36,7 +36,7 @@ public class TestNativeAzureFileSystemAuthorizationWithOwner
@Before
public void beforeMethod() {
super.beforeMethod();
- authorizer.init(null, true);
+ authorizer.init(fs.getConf(), true);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
index efda15d..393dcfd 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
@@ -344,7 +344,7 @@ public class TestWasbRemoteCallHelper
Mockito.when(mockHttpClient.execute(argThat(new HttpGetForServiceLocal())))
.thenReturn(mockHttpResponseServiceLocal);
- //Need 3 times because performop() does 3 fs operations.
+ //Need 2 times because performop() does 2 fs operations.
Mockito.when(mockHttpEntity.getContent())
.thenReturn(new ByteArrayInputStream(validJsonResponse()
.getBytes(StandardCharsets.UTF_8)))
@@ -356,8 +356,8 @@ public class TestWasbRemoteCallHelper
performop(mockHttpClient);
- Mockito.verify(mockHttpClient, times(3)).execute(Mockito.argThat(new HttpGetForServiceLocal()));
- Mockito.verify(mockHttpClient, times(3)).execute(Mockito.argThat(new HttpGetForService2()));
+ Mockito.verify(mockHttpClient, times(2)).execute(Mockito.argThat(new HttpGetForServiceLocal()));
+ Mockito.verify(mockHttpClient, times(2)).execute(Mockito.argThat(new HttpGetForService2()));
}
@Test
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml b/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml
index c73d6d8..acd9459 100644
--- a/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml
+++ b/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml
@@ -31,10 +31,9 @@
<property>
<name>fs.azure.secure.mode</name>
- <value>false</value>
+ <value>true</value>
</property>
-
<!-- Save the above configuration properties in a separate file named -->
<!-- azure-auth-keys.xml in the same directory as this file. -->
<!-- DO NOT ADD azure-auth-keys.xml TO REVISION CONTROL. The keys to your -->
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[26/50] [abbrv] hadoop git commit: MAPREDUCE-6889. Add Job#close API
to shutdown MR client services. Contributed by Rohith Sharma K S.
Posted by xy...@apache.org.
MAPREDUCE-6889. Add Job#close API to shutdown MR client services. Contributed by Rohith Sharma K S.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb3b5d33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb3b5d33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb3b5d33
Branch: refs/heads/HDFS-7240
Commit: fb3b5d33ffb29ee8e1ffbd2eee7a603a5777ebaf
Parents: 02b141a
Author: Sunil G <su...@apache.org>
Authored: Mon Jul 17 13:35:15 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Mon Jul 17 13:35:15 2017 +0530
----------------------------------------------------------------------
.../src/main/java/org/apache/hadoop/mapreduce/Job.java | 13 ++++++++++++-
.../hadoop/mapreduce/TestMapperReducerCleanup.java | 4 ++++
2 files changed, 16 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb3b5d33/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
index 2048768..5530d95 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
@@ -75,7 +75,7 @@ import org.apache.hadoop.yarn.api.records.ReservationId;
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
-public class Job extends JobContextImpl implements JobContext {
+public class Job extends JobContextImpl implements JobContext, AutoCloseable {
private static final Log LOG = LogFactory.getLog(Job.class);
@InterfaceStability.Evolving
@@ -1553,4 +1553,15 @@ public class Job extends JobContextImpl implements JobContext {
this.reservationId = reservationId;
}
+ /**
+ * Close the <code>Job</code>.
+ * @throws IOException if fail to close.
+ */
+ @Override
+ public void close() throws IOException {
+ if (cluster != null) {
+ cluster.close();
+ cluster = null;
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb3b5d33/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapperReducerCleanup.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapperReducerCleanup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapperReducerCleanup.java
index 36ec966..27e4e4f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapperReducerCleanup.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapperReducerCleanup.java
@@ -329,6 +329,10 @@ public class TestMapperReducerCleanup {
Assert.assertTrue(reduceCleanup);
Assert.assertTrue(recordReaderCleanup);
Assert.assertTrue(recordWriterCleanup);
+
+ Assert.assertNotNull(job.getCluster());
+ job.close();
+ Assert.assertNull(job.getCluster());
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[31/50] [abbrv] hadoop git commit: HADOOP-14539. Move commons logging
APIs over to slf4j in hadoop-common. Contributed by Wenxin He.
Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 4bda637..3416746 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -58,8 +58,6 @@ import java.util.concurrent.atomic.AtomicInteger;
import javax.net.SocketFactory;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -96,12 +94,13 @@ import org.mockito.stubbing.Answer;
import com.google.common.base.Supplier;
import com.google.common.primitives.Bytes;
import com.google.common.primitives.Ints;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
/** Unit tests for IPC. */
public class TestIPC {
- public static final Log LOG =
- LogFactory.getLog(TestIPC.class);
+ public static final Logger LOG = LoggerFactory.getLogger(TestIPC.class);
private static Configuration conf;
final static int PING_INTERVAL = 1000;
@@ -230,12 +229,12 @@ public class TestIPC {
final long param = RANDOM.nextLong();
LongWritable value = call(client, param, server, conf);
if (value.get() != param) {
- LOG.fatal("Call failed!");
+ LOG.error("Call failed!");
failed = true;
break;
}
} catch (Exception e) {
- LOG.fatal("Caught: " + StringUtils.stringifyException(e));
+ LOG.error("Caught: " + StringUtils.stringifyException(e));
failed = true;
}
}
@@ -784,7 +783,7 @@ public class TestIPC {
call(client, new LongWritable(Thread.currentThread().getId()),
addr, 60000, conf);
} catch (Throwable e) {
- LOG.error(e);
+ LOG.error(e.toString());
failures.incrementAndGet();
return;
} finally {
@@ -895,7 +894,7 @@ public class TestIPC {
callBarrier.await();
}
} catch (Throwable t) {
- LOG.error(t);
+ LOG.error(t.toString());
error.set(true);
}
}
@@ -917,7 +916,7 @@ public class TestIPC {
callReturned.countDown();
Thread.sleep(10000);
} catch (IOException e) {
- LOG.error(e);
+ LOG.error(e.toString());
} catch (InterruptedException e) {
} finally {
client.stop();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
index 546cb8f..7d7905e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
@@ -32,8 +32,6 @@ import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.BytesWritable;
@@ -45,6 +43,8 @@ import org.apache.hadoop.ipc.Server.Call;
import org.apache.hadoop.net.NetUtils;
import org.junit.Assert;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This test provokes partial writes in the server, which is
@@ -52,8 +52,8 @@ import org.junit.Test;
*/
public class TestIPCServerResponder {
- public static final Log LOG =
- LogFactory.getLog(TestIPCServerResponder.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestIPCServerResponder.class);
private static Configuration conf = new Configuration();
@@ -126,7 +126,7 @@ public class TestIPCServerResponder {
call(client, param, address);
Thread.sleep(RANDOM.nextInt(20));
} catch (Exception e) {
- LOG.fatal("Caught Exception", e);
+ LOG.error("Caught Exception", e);
failed = true;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java
index f5fefe4..476b197 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpcServerHandoff.java
@@ -29,18 +29,18 @@ import java.util.concurrent.Future;
import com.google.protobuf.BlockingService;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.protobuf.TestProtos;
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcHandoffProto;
import org.junit.Assert;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestProtoBufRpcServerHandoff {
- public static final Log LOG =
- LogFactory.getLog(TestProtoBufRpcServerHandoff.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestProtoBufRpcServerHandoff.class);
@Test(timeout = 20000)
public void test() throws Exception {
@@ -83,8 +83,8 @@ public class TestProtoBufRpcServerHandoff {
ClientInvocationCallable callable1 = future1.get();
ClientInvocationCallable callable2 = future2.get();
- LOG.info(callable1);
- LOG.info(callable2);
+ LOG.info(callable1.toString());
+ LOG.info(callable2.toString());
// Ensure the 5 second sleep responses are within a reasonable time of each
// other.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 8725cf4..c6209d2 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.ipc;
import com.google.common.base.Supplier;
import com.google.protobuf.ServiceException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -54,6 +52,8 @@ import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
import javax.net.SocketFactory;
@@ -104,7 +104,7 @@ import static org.mockito.Mockito.verify;
@SuppressWarnings("deprecation")
public class TestRPC extends TestRpcBase {
- public static final Log LOG = LogFactory.getLog(TestRPC.class);
+ public static final Logger LOG = LoggerFactory.getLogger(TestRPC.class);
@Before
public void setup() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java
index a06d9fd..ffee086 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java
@@ -18,12 +18,12 @@
package org.apache.hadoop.ipc;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.lang.reflect.Method;
@@ -39,8 +39,8 @@ public class TestRPCCompatibility {
private static RPC.Server server;
private ProtocolProxy<?> proxy;
- public static final Log LOG =
- LogFactory.getLog(TestRPCCompatibility.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestRPCCompatibility.class);
private static Configuration conf = new Configuration();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCServerShutdown.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCServerShutdown.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCServerShutdown.java
index 93af7d4..aee8893 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCServerShutdown.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCServerShutdown.java
@@ -19,11 +19,11 @@
package org.apache.hadoop.ipc;
import com.google.protobuf.ServiceException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.junit.Before;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
@@ -42,7 +42,8 @@ import static org.junit.Assert.fail;
@SuppressWarnings("deprecation")
public class TestRPCServerShutdown extends TestRpcBase {
- public static final Log LOG = LogFactory.getLog(TestRPCServerShutdown.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestRPCServerShutdown.class);
@Before
public void setup() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcServerHandoff.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcServerHandoff.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcServerHandoff.java
index 3716bc3..2e0b3da 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcServerHandoff.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcServerHandoff.java
@@ -30,19 +30,19 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.net.NetUtils;
import org.junit.Assert;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestRpcServerHandoff {
- public static final Log LOG =
- LogFactory.getLog(TestRpcServerHandoff.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestRpcServerHandoff.class);
private static final String BIND_ADDRESS = "0.0.0.0";
private static final Configuration conf = new Configuration();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
index 7608cb4..0b463a5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.ipc;
import com.google.protobuf.ServiceException;
import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -43,6 +41,8 @@ import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
import javax.security.auth.callback.Callback;
@@ -120,8 +120,7 @@ public class TestSaslRPC extends TestRpcBase {
this.saslPropertiesResolver = saslPropertiesResolver;
}
- public static final Log LOG =
- LogFactory.getLog(TestSaslRPC.class);
+ public static final Logger LOG = LoggerFactory.getLogger(TestSaslRPC.class);
static final String ERROR_MESSAGE = "Token is invalid";
static final String SERVER_KEYTAB_KEY = "test.ipc.server.keytab";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java
index afda535..420d6b9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java
@@ -27,12 +27,12 @@ import java.net.BindException;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
-import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.Server.Call;
import org.junit.Test;
+import org.slf4j.Logger;
/**
* This is intended to be a set of unit tests for the
@@ -136,7 +136,7 @@ public class TestServer {
public void testLogExceptions() throws Exception {
final Configuration conf = new Configuration();
final Call dummyCall = new Call(0, 0, null, null);
- Log logger = mock(Log.class);
+ Logger logger = mock(Logger.class);
Server server = new Server("0.0.0.0", 0, LongWritable.class, 1, conf) {
@Override
public Writable call(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestWeightedRoundRobinMultiplexer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestWeightedRoundRobinMultiplexer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestWeightedRoundRobinMultiplexer.java
index 6428176..d4bc06a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestWeightedRoundRobinMultiplexer.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestWeightedRoundRobinMultiplexer.java
@@ -21,14 +21,15 @@ package org.apache.hadoop.ipc;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.ipc.WeightedRoundRobinMultiplexer.IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY;
public class TestWeightedRoundRobinMultiplexer {
- public static final Log LOG = LogFactory.getLog(TestWeightedRoundRobinMultiplexer.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestWeightedRoundRobinMultiplexer.class);
private WeightedRoundRobinMultiplexer mux;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
index 7e094ed..7bc772f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
@@ -31,8 +31,6 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsTag;
@@ -47,9 +45,12 @@ import org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30;
import org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31;
import org.apache.hadoop.metrics2.sink.ganglia.GangliaMetricsTestHelper;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestGangliaMetrics {
- public static final Log LOG = LogFactory.getLog(TestMetricsSystemImpl.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestMetricsSystemImpl.class);
// This is the prefix to locate the config file for this particular test
// This is to avoid using the same config file with other test cases,
// which can cause race conditions.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java
index 2a62acc..b53be4d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java
@@ -24,15 +24,16 @@ import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.configuration2.Configuration;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import static org.apache.hadoop.metrics2.impl.ConfigUtil.*;
/**
* Test metrics configuration
*/
public class TestMetricsConfig {
- static final Log LOG = LogFactory.getLog(TestMetricsConfig.class);
+ static final Logger LOG = LoggerFactory.getLogger(TestMetricsConfig.class);
/**
* Common use cases
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
index cfea104..abd1b13 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
@@ -42,8 +42,6 @@ import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import org.apache.commons.configuration2.SubsetConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsException;
import static org.apache.hadoop.test.MoreAsserts.*;
@@ -61,14 +59,17 @@ import org.apache.hadoop.metrics2.lib.MutableRate;
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Test the MetricsSystemImpl class
*/
@RunWith(MockitoJUnitRunner.class)
public class TestMetricsSystemImpl {
- private static final Log LOG = LogFactory.getLog(TestMetricsSystemImpl.class);
-
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestMetricsSystemImpl.class);
+
static { DefaultMetricsSystem.setMiniClusterMode(true); }
@Captor private ArgumentCaptor<MetricsRecord> r1;
@@ -78,7 +79,7 @@ public class TestMetricsSystemImpl {
public static class TestSink implements MetricsSink {
@Override public void putMetrics(MetricsRecord record) {
- LOG.debug(record);
+ LOG.debug(record.toString());
}
@Override public void flush() {}
@@ -420,7 +421,7 @@ public class TestMetricsSystemImpl {
}
private void checkMetricsRecords(List<MetricsRecord> recs) {
- LOG.debug(recs);
+ LOG.debug(recs.toString());
MetricsRecord r = recs.get(0);
assertEquals("name", "s1rec", r.name());
assertEquals("tags", new MetricsTag[] {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java
index 7da8d1b..719130f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java
@@ -22,18 +22,20 @@ import java.util.ConcurrentModificationException;
import java.util.concurrent.CountDownLatch;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import static org.apache.hadoop.metrics2.impl.SinkQueue.*;
/**
* Test the half-blocking metrics sink queue
*/
public class TestSinkQueue {
- private static final Log LOG = LogFactory.getLog(TestSinkQueue.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestSinkQueue.class);
/**
* Test common use case
@@ -234,7 +236,7 @@ public class TestSinkQueue {
callback.run();
}
catch (ConcurrentModificationException e) {
- LOG.info(e);
+ LOG.info(e.toString());
return;
}
LOG.error("should've thrown CME");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
index fd716ae..b0d7deb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
@@ -34,18 +34,19 @@ import java.util.Map.Entry;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.util.Quantile;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Test metrics record builder interface and mutable metrics
*/
public class TestMutableMetrics {
- private static final Log LOG = LogFactory.getLog(TestMutableMetrics.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestMutableMetrics.class);
private final double EPSILON = 1e-42;
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestMetricsCache.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestMetricsCache.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestMetricsCache.java
index 7bee3a2..e69947e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestMetricsCache.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/util/TestMetricsCache.java
@@ -25,15 +25,17 @@ import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsTag;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import static org.apache.hadoop.metrics2.lib.Interns.*;
public class TestMetricsCache {
- private static final Log LOG = LogFactory.getLog(TestMetricsCache.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestMetricsCache.class);
@SuppressWarnings("deprecation")
@Test public void testUpdate() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
index a294e74..80f2ebc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
@@ -18,16 +18,17 @@
package org.apache.hadoop.net;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.io.IOException;
import java.net.ServerSocket;
import java.util.Random;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
public class ServerSocketUtil {
- private static final Log LOG = LogFactory.getLog(ServerSocketUtil.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ServerSocketUtil.class);
private static Random rand = new Random();
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
index 863d380..3aa0acd 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
@@ -28,11 +28,11 @@ import java.net.InetAddress;
import javax.naming.CommunicationException;
import javax.naming.NameNotFoundException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Time;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
import static org.hamcrest.CoreMatchers.not;
@@ -44,7 +44,7 @@ import static org.junit.Assert.*;
*/
public class TestDNS {
- private static final Log LOG = LogFactory.getLog(TestDNS.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TestDNS.class);
private static final String DEFAULT = "default";
// This is not a legal hostname (starts with a hyphen). It will never
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
index 1375d9b..fc1c102 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
@@ -38,8 +38,6 @@ import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.NetUtilsTestResolver;
@@ -47,10 +45,12 @@ import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestNetUtils {
- private static final Log LOG = LogFactory.getLog(TestNetUtils.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TestNetUtils.class);
private static final int DEST_PORT = 4040;
private static final String DEST_PORT_NAME = Integer.toString(DEST_PORT);
private static final int LOCAL_PORT = 8080;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
index 649ba12..f1c03cf 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
@@ -25,8 +25,6 @@ import java.net.SocketTimeoutException;
import java.nio.channels.Pipe;
import java.util.Arrays;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.MultithreadedTestUtil;
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
@@ -36,6 +34,9 @@ import org.apache.hadoop.util.Shell;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import static org.junit.Assert.*;
/**
@@ -47,7 +48,8 @@ import static org.junit.Assert.*;
*/
public class TestSocketIOWithTimeout {
- static Log LOG = LogFactory.getLog(TestSocketIOWithTimeout.class);
+ static final Logger LOG =
+ LoggerFactory.getLogger(TestSocketIOWithTimeout.class);
private static int TIMEOUT = 1*1000;
private static String TEST_STRING = "1234567890";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestStaticMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestStaticMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestStaticMapping.java
index f3c0a5c..a906c4a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestStaticMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestStaticMapping.java
@@ -18,12 +18,12 @@
package org.apache.hadoop.net;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.junit.Assert;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
@@ -34,7 +34,8 @@ import java.util.Map;
* Because the map is actually static, this map needs to be reset for every test
*/
public class TestStaticMapping extends Assert {
- private static final Log LOG = LogFactory.getLog(TestStaticMapping.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestStaticMapping.class);
/**
* Reset the map then create a new instance of the {@link StaticMapping}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
index 8a5a0a4..28b3cbe 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
@@ -203,7 +203,7 @@ public class TestDomainSocket {
} catch (IOException e) {
throw new RuntimeException("unexpected IOException", e);
} finally {
- IOUtils.cleanup(DomainSocket.LOG, serverConn);
+ IOUtils.cleanupWithLogger(DomainSocket.LOG, serverConn);
}
return null;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
index 4cc86a7..aa522f2 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
@@ -26,17 +26,18 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.junit.After;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import com.google.common.util.concurrent.Uninterruptibles;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestDomainSocketWatcher {
- static final Log LOG = LogFactory.getLog(TestDomainSocketWatcher.class);
+ static final Logger LOG =
+ LoggerFactory.getLogger(TestDomainSocketWatcher.class);
private Throwable trappedException = null;
@@ -141,7 +142,7 @@ public class TestDomainSocketWatcher {
}
}
} catch (Throwable e) {
- LOG.error(e);
+ LOG.error(e.toString());
throw new RuntimeException(e);
}
}
@@ -169,7 +170,7 @@ public class TestDomainSocketWatcher {
}
}
} catch (Throwable e) {
- LOG.error(e);
+ LOG.error(e.toString());
throw new RuntimeException(e);
}
}
@@ -212,7 +213,7 @@ public class TestDomainSocketWatcher {
TimeUnit.MILLISECONDS.sleep(1);
}
} catch (Throwable e) {
- LOG.error(e);
+ LOG.error(e.toString());
throw new RuntimeException(e);
}
}
@@ -241,7 +242,7 @@ public class TestDomainSocketWatcher {
}
}
} catch (Throwable e) {
- LOG.error(e);
+ LOG.error(e.toString());
throw new RuntimeException(e);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java
index 79f56e0..0a2d42c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java
@@ -24,16 +24,17 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestCompositeGroupMapping {
- public static final Log LOG = LogFactory.getLog(TestCompositeGroupMapping.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestCompositeGroupMapping.class);
private static Configuration conf = new Configuration();
private static class TestUser {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java
index c4dbcac..3293903 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.security;
import com.google.protobuf.ServiceException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.Text;
@@ -34,6 +32,8 @@ import org.apache.hadoop.security.token.Token;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.InetAddress;
@@ -58,8 +58,8 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
private static final Configuration masterConf = new Configuration();
- public static final Log LOG = LogFactory
- .getLog(TestDoAsEffectiveUser.class);
+ public static final Logger LOG = LoggerFactory
+ .getLogger(TestDoAsEffectiveUser.class);
static {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
index 85f17b1..3ef3698 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
@@ -21,16 +21,17 @@ import static org.junit.Assert.assertTrue;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
public class TestGroupFallback {
- public static final Log LOG = LogFactory.getLog(TestGroupFallback.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestGroupFallback.class);
@Test
public void testGroupShell() throws Exception {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
index 930c45e..46e36b3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
@@ -40,17 +40,17 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestGroupsCaching {
- public static final Log TESTLOG = LogFactory.getLog(TestGroupsCaching.class);
+ public static final Logger TESTLOG =
+ LoggerFactory.getLogger(TestGroupsCaching.class);
private static String[] myGroups = {"grp1", "grp2"};
private Configuration conf;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
index 6d9ea08..d3c9538 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.security;
import java.io.IOException;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.test.GenericTestUtils;
@@ -30,6 +28,8 @@ import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ExitCodeException;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.junit.Assert.*;
import static org.mockito.Mockito.doNothing;
@@ -38,8 +38,8 @@ import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestShellBasedUnixGroupsMapping {
- private static final Log TESTLOG =
- LogFactory.getLog(TestShellBasedUnixGroupsMapping.class);
+ private static final Logger TESTLOG =
+ LoggerFactory.getLogger(TestShellBasedUnixGroupsMapping.class);
private final GenericTestUtils.LogCapturer shellMappingLog =
GenericTestUtils.LogCapturer.captureLogs(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java
index 354dade..6fa5992 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java
@@ -23,8 +23,6 @@ import java.net.URI;
import java.util.List;
import java.util.Random;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -39,13 +37,16 @@ import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestCredentialProviderFactory {
- public static final Log LOG = LogFactory.getLog(TestCredentialProviderFactory.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestCredentialProviderFactory.class);
@Rule
public final TestName test = new TestName();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
index ddf74d1..0868381 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
@@ -26,8 +26,6 @@ import java.util.Collection;
import java.util.Iterator;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -36,6 +34,8 @@ import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.NativeCodeLoader;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
@@ -45,8 +45,8 @@ import static org.mockito.Mockito.verify;
@InterfaceStability.Evolving
public class TestAccessControlList {
- private static final Log LOG =
- LogFactory.getLog(TestAccessControlList.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestAccessControlList.class);
/**
* Test the netgroups (groups in ACL rules that start with @)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java
index 577f11b..9061fe7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java
@@ -25,8 +25,6 @@ import java.security.SecureRandom;
import java.util.Arrays;
import java.util.Collection;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.security.Groups;
@@ -34,11 +32,13 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestProxyUsers {
- private static final Log LOG =
- LogFactory.getLog(TestProxyUsers.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestProxyUsers.class);
private static final String REAL_USER_NAME = "proxier";
private static final String PROXY_USER_NAME = "proxied_user";
private static final String AUTHORIZED_PROXY_USER_NAME = "authorized_proxied_user";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
index b41ff15..ad12f0b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
@@ -32,8 +32,6 @@ import java.util.Map;
import org.junit.Assert;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
@@ -49,11 +47,14 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecret
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.Time;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.junit.Assert.*;
public class TestDelegationToken {
- private static final Log LOG = LogFactory.getLog(TestDelegationToken.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestDelegationToken.class);
private static final Text KIND = new Text("MY KIND");
public static class TestDelegationTokenIdentifier
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
index 6189c0e..ad3dfcf 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
@@ -18,12 +18,12 @@
package org.apache.hadoop.service;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.Service.STATE;
import org.junit.Before;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -36,7 +36,8 @@ public class TestCompositeService {
private static final int FAILED_SERVICE_SEQ_NUMBER = 2;
- private static final Log LOG = LogFactory.getLog(TestCompositeService.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestCompositeService.class);
/**
* flag to state policy of CompositeService, and hence
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java
index cf9ca32..f72e130 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java
@@ -19,8 +19,6 @@
package org.apache.hadoop.service;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.LoggingStateChangeListener;
@@ -28,9 +26,12 @@ import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.ServiceStateChangeListener;
import org.apache.hadoop.service.ServiceStateException;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestServiceLifecycle extends ServiceAssert {
- private static Log LOG = LogFactory.getLog(TestServiceLifecycle.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestServiceLifecycle.class);
/**
* Walk the {@link BreakableService} through it's lifecycle,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java
index b2f6054..caa65c5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java
@@ -32,8 +32,6 @@ import org.mockito.invocation.InvocationOnMock;
import org.mockito.ArgumentCaptor;
import org.mockito.ArgumentMatcher;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsSource;
@@ -42,6 +40,8 @@ import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableQuantiles;
import org.apache.hadoop.metrics2.util.Quantile;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.metrics2.lib.Interns.*;
@@ -50,7 +50,7 @@ import static org.apache.hadoop.metrics2.lib.Interns.*;
*/
public class MetricsAsserts {
- final static Log LOG = LogFactory.getLog(MetricsAsserts.class);
+ final static Logger LOG = LoggerFactory.getLogger(MetricsAsserts.class);
private static final double EPSILON = 0.00001;
public static MetricsSystem mockMetricsSystem() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java
index b51329f..217c2f8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java
@@ -20,9 +20,9 @@ package org.apache.hadoop.test;
import java.util.HashSet;
import java.util.Set;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A utility to easily test threaded/synchronized code.
@@ -60,8 +60,8 @@ import org.apache.hadoop.util.Time;
*/
public abstract class MultithreadedTestUtil {
- public static final Log LOG =
- LogFactory.getLog(MultithreadedTestUtil.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(MultithreadedTestUtil.class);
/**
* TestContext is used to setup the multithreaded test runner.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
index c1d45cc..fb7bd22 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
@@ -18,9 +18,6 @@
package org.apache.hadoop.test;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
import org.junit.Test;
import org.slf4j.Logger;
@@ -90,7 +87,7 @@ public class TestGenericTestUtils extends GenericTestUtils {
@Test(timeout = 10000)
public void testLogCapturer() {
- final Log log = LogFactory.getLog(TestGenericTestUtils.class);
+ final Logger log = LoggerFactory.getLogger(TestGenericTestUtils.class);
LogCapturer logCapturer = LogCapturer.captureLogs(log);
final String infoMessage = "info message";
// test get output message
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestJUnitSetup.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestJUnitSetup.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestJUnitSetup.java
index d6ae04d..4c0b965 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestJUnitSetup.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestJUnitSetup.java
@@ -18,13 +18,14 @@
package org.apache.hadoop.test;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.junit.Assert;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestJUnitSetup {
- public static final Log LOG = LogFactory.getLog(TestJUnitSetup.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestJUnitSetup.class);
@Test
public void testJavaAssert() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java
index 075ef69..58935f2 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java
@@ -19,17 +19,18 @@ package org.apache.hadoop.util;
import junit.framework.TestCase;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.AsyncDiskService;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A test for AsyncDiskService.
*/
public class TestAsyncDiskService extends TestCase {
- public static final Log LOG = LogFactory.getLog(TestAsyncDiskService.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestAsyncDiskService.class);
// Access by multiple threads from the ThreadPools in AsyncDiskService.
volatile int count;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java
index a38c3d7..529887b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java
@@ -28,21 +28,22 @@ import java.util.jar.Attributes;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Tests covering the classpath command-line utility.
*/
public class TestClasspath {
- private static final Log LOG = LogFactory.getLog(TestClasspath.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TestClasspath
+ .class);
private static final File TEST_DIR = GenericTestUtils.getTestDir(
"TestClasspath");
private static final Charset UTF8 = Charset.forName("UTF-8");
@@ -75,7 +76,7 @@ public class TestClasspath {
public void tearDown() {
System.setOut(oldStdout);
System.setErr(oldStderr);
- IOUtils.cleanup(LOG, printStdout, printStderr);
+ IOUtils.cleanupWithLogger(LOG, printStdout, printStderr);
assertTrue(FileUtil.fullyDelete(TEST_DIR));
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFindClass.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFindClass.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFindClass.java
index 28389c2..3a4ebd5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFindClass.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFindClass.java
@@ -20,17 +20,18 @@ package org.apache.hadoop.util;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import junit.framework.Assert;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.FindClass;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Test the find class logic
*/
public class TestFindClass extends Assert {
- private static final Log LOG = LogFactory.getLog(TestFindClass.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestFindClass.class);
public static final String LOG4J_PROPERTIES = "log4j.properties";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIdentityHashStore.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIdentityHashStore.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIdentityHashStore.java
index bd74855..2c27b76 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIdentityHashStore.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIdentityHashStore.java
@@ -23,14 +23,15 @@ import java.util.List;
import org.junit.Assert;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.IdentityHashStore;
import org.apache.hadoop.util.IdentityHashStore.Visitor;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestIdentityHashStore {
- private static final Log LOG = LogFactory.getLog(TestIdentityHashStore.class.getName());
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestIdentityHashStore.class.getName());
private static class Key {
private final String name;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightGSet.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightGSet.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightGSet.java
index 671dd37..3751253 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightGSet.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightGSet.java
@@ -21,15 +21,16 @@ import java.util.ArrayList;
import java.util.Iterator;
import java.util.Random;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
import org.junit.Assert;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** Testing {@link LightWeightGSet} */
public class TestLightWeightGSet {
- public static final Log LOG = LogFactory.getLog(TestLightWeightGSet.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestLightWeightGSet.class);
private static ArrayList<Integer> getRandomList(int length, int randomSeed) {
Random random = new Random(randomSeed);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightResizableGSet.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightResizableGSet.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightResizableGSet.java
index 3250092..19f213d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightResizableGSet.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightResizableGSet.java
@@ -23,15 +23,16 @@ import java.util.Iterator;
import java.util.Random;
import java.util.Set;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.junit.Assert.*;
/** Testing {@link LightWeightResizableGSet} */
public class TestLightWeightResizableGSet {
- public static final Log LOG = LogFactory.getLog(TestLightWeightResizableGSet.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestLightWeightResizableGSet.class);
private Random random = new Random();
private TestElement[] generateElements(int length) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java
index 473c177..58874fd 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java
@@ -20,16 +20,16 @@ package org.apache.hadoop.util;
import org.junit.Test;
import static org.junit.Assert.*;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.crypto.OpensslCipher;
import org.apache.hadoop.io.compress.Lz4Codec;
import org.apache.hadoop.io.compress.SnappyCodec;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestNativeCodeLoader {
- static final Log LOG = LogFactory.getLog(TestNativeCodeLoader.class);
+ static final Logger LOG = LoggerFactory.getLogger(TestNativeCodeLoader.class);
private static boolean requireTestJni() {
String rtj = System.getProperty("require.test.libhadoop");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java
index 73323ea..a9fa4c6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java
@@ -19,21 +19,22 @@
package org.apache.hadoop.util;
import org.apache.commons.lang.SystemUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestSignalLogger {
- public static final Log LOG = LogFactory.getLog(TestSignalLogger.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestSignalLogger.class);
@Test(timeout=60000)
public void testInstall() throws Exception {
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
- SignalLogger.INSTANCE.register(LOG);
+ SignalLogger.INSTANCE.register(LogAdapter.create(LOG));
try {
- SignalLogger.INSTANCE.register(LOG);
+ SignalLogger.INSTANCE.register(LogAdapter.create(LOG));
Assert.fail("expected IllegalStateException from double registration");
} catch (IllegalStateException e) {
// fall through
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
index e45890c..baf4251 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
@@ -29,13 +29,13 @@ import java.io.FileWriter;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.hamcrest.CoreMatchers.*;
@@ -44,7 +44,7 @@ import static org.hamcrest.CoreMatchers.*;
*/
public class TestWinUtils {
- private static final Log LOG = LogFactory.getLog(TestWinUtils.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TestWinUtils.class);
private static File TEST_DIR = GenericTestUtils.getTestDir(
TestWinUtils.class.getSimpleName());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
index 1ef0085..8c13b4f 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
@@ -19,13 +19,13 @@ package org.apache.hadoop.mount;
import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.SimpleTcpServer;
import org.apache.hadoop.oncrpc.SimpleUdpServer;
import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.util.ShutdownHookManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.util.ExitUtil.terminate;
@@ -37,7 +37,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
* handle for requested directory and returns it to the client.
*/
abstract public class MountdBase {
- public static final Log LOG = LogFactory.getLog(MountdBase.class);
+ public static final Logger LOG = LoggerFactory.getLogger(MountdBase.class);
private final RpcProgram rpcProgram;
private int udpBoundPort; // Will set after server starts
private int tcpBoundPort; // Will set after server starts
@@ -63,7 +63,7 @@ abstract public class MountdBase {
try {
udpServer.run();
} catch (Throwable e) {
- LOG.fatal("Failed to start the UDP server.", e);
+ LOG.error("Failed to start the UDP server.", e);
if (udpServer.getBoundPort() > 0) {
rpcProgram.unregister(PortmapMapping.TRANSPORT_UDP,
udpServer.getBoundPort());
@@ -82,7 +82,7 @@ abstract public class MountdBase {
try {
tcpServer.run();
} catch (Throwable e) {
- LOG.fatal("Failed to start the TCP server.", e);
+ LOG.error("Failed to start the TCP server.", e);
if (tcpServer.getBoundPort() > 0) {
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP,
tcpServer.getBoundPort());
@@ -103,7 +103,7 @@ abstract public class MountdBase {
rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
} catch (Throwable e) {
- LOG.fatal("Failed to register the MOUNT service.", e);
+ LOG.error("Failed to register the MOUNT service.", e);
terminate(1, e);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
index a299ff0..3d5088d 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
@@ -22,8 +22,6 @@ import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.commons.net.util.SubnetUtils;
import org.apache.commons.net.util.SubnetUtils.SubnetInfo;
import org.apache.hadoop.conf.Configuration;
@@ -35,6 +33,8 @@ import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class provides functionality for loading and checking the mapping
@@ -64,7 +64,7 @@ public class NfsExports {
return exports;
}
- public static final Log LOG = LogFactory.getLog(NfsExports.class);
+ public static final Logger LOG = LoggerFactory.getLogger(NfsExports.class);
// only support IPv4 now
private static final String IP_ADDRESS =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/FileHandle.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/FileHandle.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/FileHandle.java
index bff5eec..5b32798 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/FileHandle.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/FileHandle.java
@@ -23,9 +23,9 @@ import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.XDR;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This is a file handle use by the NFS clients.
@@ -33,7 +33,7 @@ import org.apache.hadoop.oncrpc.XDR;
* on subsequent operations to reference the file.
*/
public class FileHandle {
- private static final Log LOG = LogFactory.getLog(FileHandle.class);
+ private static final Logger LOG = LoggerFactory.getLogger(FileHandle.class);
private static final String HEXES = "0123456789abcdef";
private static final int HANDLE_LEN = 32;
private byte[] handle; // Opaque handle
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
index 80faca5..00e6d9f 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
@@ -17,13 +17,13 @@
*/
package org.apache.hadoop.nfs.nfs3;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.SimpleTcpServer;
import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.util.ShutdownHookManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.util.ExitUtil.terminate;
@@ -32,7 +32,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
* Only TCP server is supported and UDP is not supported.
*/
public abstract class Nfs3Base {
- public static final Log LOG = LogFactory.getLog(Nfs3Base.class);
+ public static final Logger LOG = LoggerFactory.getLogger(Nfs3Base.class);
private final RpcProgram rpcProgram;
private int nfsBoundPort; // Will set after server starts
@@ -54,7 +54,7 @@ public abstract class Nfs3Base {
try {
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
} catch (Throwable e) {
- LOG.fatal("Failed to register the NFSv3 service.", e);
+ LOG.error("Failed to register the NFSv3 service.", e);
terminate(1, e);
}
}
@@ -67,7 +67,7 @@ public abstract class Nfs3Base {
try {
tcpServer.run();
} catch (Throwable e) {
- LOG.fatal("Failed to start the TCP server.", e);
+ LOG.error("Failed to start the TCP server.", e);
if (tcpServer.getBoundPort() > 0) {
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP,
tcpServer.getBoundPort());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RegistrationClient.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RegistrationClient.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RegistrationClient.java
index 7ba37c9..c8528ba 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RegistrationClient.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RegistrationClient.java
@@ -19,18 +19,19 @@ package org.apache.hadoop.oncrpc;
import java.util.Arrays;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.MessageEvent;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A simple client that registers an RPC program with portmap.
*/
public class RegistrationClient extends SimpleTcpClient {
- public static final Log LOG = LogFactory.getLog(RegistrationClient.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(RegistrationClient.class);
public RegistrationClient(String host, int port, XDR request) {
super(host, port, request);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java
index aa4b948..04fddec 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java
@@ -17,18 +17,18 @@
*/
package org.apache.hadoop.oncrpc;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.security.Credentials;
import org.apache.hadoop.oncrpc.security.Verifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Represents an RPC message of type RPC call as defined in RFC 1831
*/
public class RpcCall extends RpcMessage {
public static final int RPC_VERSION = 2;
- private static final Log LOG = LogFactory.getLog(RpcCall.class);
-
+ private static final Logger LOG = LoggerFactory.getLogger(RpcCall.class);
+
public static RpcCall read(XDR xdr) {
return new RpcCall(xdr.readInt(), RpcMessage.Type.fromValue(xdr.readInt()),
xdr.readInt(), xdr.readInt(), xdr.readInt(), xdr.readInt(),
@@ -60,7 +60,7 @@ public class RpcCall extends RpcMessage {
this.credentials = credential;
this.verifier = verifier;
if (LOG.isTraceEnabled()) {
- LOG.trace(this);
+ LOG.trace(this.toString());
}
validate();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
index c541cd6..5c059aa 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
@@ -23,8 +23,6 @@ import java.net.InetSocketAddress;
import java.net.SocketAddress;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
import org.apache.hadoop.oncrpc.security.Verifier;
import org.apache.hadoop.oncrpc.security.VerifierNone;
@@ -35,13 +33,15 @@ import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Class for writing RPC server programs based on RFC 1050. Extend this class
* and implement {@link #handleInternal} to handle the requests received.
*/
public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
- static final Log LOG = LogFactory.getLog(RpcProgram.class);
+ static final Logger LOG = LoggerFactory.getLogger(RpcProgram.class);
public static final int RPCB_PORT = 111;
private final String program;
private final String host;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[06/50] [abbrv] hadoop git commit: YARN-6775. CapacityScheduler:
Improvements to assignContainers,
avoid unnecessary canAssignToUser/Queue calls. (Nathan Roberts via wangda)
Posted by xy...@apache.org.
YARN-6775. CapacityScheduler: Improvements to assignContainers, avoid unnecessary canAssignToUser/Queue calls. (Nathan Roberts via wangda)
Change-Id: I84ccd54200ccbaae23018ef320028e42b4c3509a
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/945c0958
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/945c0958
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/945c0958
Branch: refs/heads/HDFS-7240
Commit: 945c0958bb8df3dd9d5f1467f1216d2e6b0ee3d8
Parents: b61ab85
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Jul 13 10:30:15 2017 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Thu Jul 13 10:30:15 2017 -0700
----------------------------------------------------------------------
.../scheduler/activities/ActivitiesLogger.java | 33 +++--
.../scheduler/capacity/LeafQueue.java | 83 ++++++++---
.../capacity/TestCapacityScheduler.java | 146 ++++++++++++++++++-
.../scheduler/capacity/TestLeafQueue.java | 10 +-
4 files changed, 231 insertions(+), 41 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/945c0958/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
index 3f8ed55..12aff02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
@@ -63,9 +63,14 @@ public class ActivitiesLogger {
SchedulerApplicationAttempt application, Priority priority,
String diagnostic) {
String type = "app";
- recordActivity(activitiesManager, node, application.getQueueName(),
- application.getApplicationId().toString(), priority,
- ActivityState.REJECTED, diagnostic, type);
+ if (activitiesManager == null) {
+ return;
+ }
+ if (activitiesManager.shouldRecordThisNode(node.getNodeID())) {
+ recordActivity(activitiesManager, node, application.getQueueName(),
+ application.getApplicationId().toString(), priority,
+ ActivityState.REJECTED, diagnostic, type);
+ }
finishSkippedAppAllocationRecording(activitiesManager,
application.getApplicationId(), ActivityState.REJECTED, diagnostic);
}
@@ -203,8 +208,13 @@ public class ActivitiesLogger {
public static void recordQueueActivity(ActivitiesManager activitiesManager,
SchedulerNode node, String parentQueueName, String queueName,
ActivityState state, String diagnostic) {
- recordActivity(activitiesManager, node, parentQueueName, queueName, null,
- state, diagnostic, null);
+ if (activitiesManager == null) {
+ return;
+ }
+ if (activitiesManager.shouldRecordThisNode(node.getNodeID())) {
+ recordActivity(activitiesManager, node, parentQueueName, queueName,
+ null, state, diagnostic, null);
+ }
}
}
@@ -266,13 +276,10 @@ public class ActivitiesLogger {
private static void recordActivity(ActivitiesManager activitiesManager,
SchedulerNode node, String parentName, String childName,
Priority priority, ActivityState state, String diagnostic, String type) {
- if (activitiesManager == null) {
- return;
- }
- if (activitiesManager.shouldRecordThisNode(node.getNodeID())) {
- activitiesManager.addSchedulingActivityForNode(node.getNodeID(),
- parentName, childName, priority != null ? priority.toString() : null,
- state, diagnostic, type);
- }
+
+ activitiesManager.addSchedulingActivityForNode(node.getNodeID(), parentName,
+ childName, priority != null ? priority.toString() : null, state,
+ diagnostic, type);
+
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/945c0958/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 013a5ac..2e502b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -1026,6 +1026,8 @@ public class LeafQueue extends AbstractCSQueue {
return CSAssignment.NULL_ASSIGNMENT;
}
+ Map<String, CachedUserLimit> userLimits = new HashMap<>();
+ boolean needAssignToQueueCheck = true;
for (Iterator<FiCaSchedulerApp> assignmentIterator =
orderingPolicy.getAssignmentIterator();
assignmentIterator.hasNext(); ) {
@@ -1035,24 +1037,50 @@ public class LeafQueue extends AbstractCSQueue {
node.getNodeID(), SystemClock.getInstance().getTime(), application);
// Check queue max-capacity limit
- if (!super.canAssignToThisQueue(clusterResource, ps.getPartition(),
- currentResourceLimits, application.getCurrentReservation(),
- schedulingMode)) {
- ActivitiesLogger.APP.recordRejectedAppActivityFromLeafQueue(
- activitiesManager, node, application, application.getPriority(),
- ActivityDiagnosticConstant.QUEUE_MAX_CAPACITY_LIMIT);
- ActivitiesLogger.QUEUE.recordQueueActivity(activitiesManager, node,
- getParent().getQueueName(), getQueueName(), ActivityState.SKIPPED,
- ActivityDiagnosticConstant.EMPTY);
- return CSAssignment.NULL_ASSIGNMENT;
+ Resource appReserved = application.getCurrentReservation();
+ if (needAssignToQueueCheck) {
+ if (!super.canAssignToThisQueue(clusterResource, node.getPartition(),
+ currentResourceLimits, appReserved, schedulingMode)) {
+ ActivitiesLogger.APP.recordRejectedAppActivityFromLeafQueue(
+ activitiesManager, node, application, application.getPriority(),
+ ActivityDiagnosticConstant.QUEUE_MAX_CAPACITY_LIMIT);
+ ActivitiesLogger.QUEUE.recordQueueActivity(activitiesManager, node,
+ getParent().getQueueName(), getQueueName(), ActivityState.SKIPPED,
+ ActivityDiagnosticConstant.EMPTY);
+ return CSAssignment.NULL_ASSIGNMENT;
+ }
+ // If there was no reservation and canAssignToThisQueue returned
+ // true, there is no reason to check further.
+ if (!this.reservationsContinueLooking
+ || appReserved.equals(Resources.none())) {
+ needAssignToQueueCheck = false;
+ }
}
+ CachedUserLimit cul = userLimits.get(application.getUser());
+ Resource cachedUserLimit = null;
+ if (cul != null) {
+ cachedUserLimit = cul.userLimit;
+ }
Resource userLimit = computeUserLimitAndSetHeadroom(application,
- clusterResource, ps.getPartition(), schedulingMode);
-
+ clusterResource, ps.getPartition(), schedulingMode, cachedUserLimit);
+ if (cul == null) {
+ cul = new CachedUserLimit(userLimit);
+ userLimits.put(application.getUser(), cul);
+ }
// Check user limit
- if (!canAssignToUser(clusterResource, application.getUser(), userLimit,
- application, ps.getPartition(), currentResourceLimits)) {
+ boolean userAssignable = true;
+ if (!cul.canAssign && Resources.fitsIn(appReserved, cul.reservation)) {
+ userAssignable = false;
+ } else {
+ userAssignable = canAssignToUser(clusterResource, application.getUser(),
+ userLimit, application, node.getPartition(), currentResourceLimits);
+ if (!userAssignable && Resources.fitsIn(cul.reservation, appReserved)) {
+ cul.canAssign = false;
+ cul.reservation = appReserved;
+ }
+ }
+ if (!userAssignable) {
application.updateAMContainerDiagnostics(AMState.ACTIVATED,
"User capacity has reached its maximum limit.");
ActivitiesLogger.APP.recordRejectedAppActivityFromLeafQueue(
@@ -1127,7 +1155,7 @@ public class LeafQueue extends AbstractCSQueue {
// check user-limit
Resource userLimit = computeUserLimitAndSetHeadroom(app, cluster, p,
- allocation.getSchedulingMode());
+ allocation.getSchedulingMode(), null);
// Deduct resources that we can release
Resource usedResource = Resources.clone(getUser(username).getUsed(p));
@@ -1332,19 +1360,20 @@ public class LeafQueue extends AbstractCSQueue {
@Lock({LeafQueue.class})
Resource computeUserLimitAndSetHeadroom(FiCaSchedulerApp application,
Resource clusterResource, String nodePartition,
- SchedulingMode schedulingMode) {
+ SchedulingMode schedulingMode, Resource userLimit) {
String user = application.getUser();
User queueUser = getUser(user);
// Compute user limit respect requested labels,
// TODO, need consider headroom respect labels also
- Resource userLimit =
- getResourceLimitForActiveUsers(application.getUser(), clusterResource,
- nodePartition, schedulingMode);
-
+ if (userLimit == null) {
+ userLimit = getResourceLimitForActiveUsers(application.getUser(),
+ clusterResource, nodePartition, schedulingMode);
+ }
setQueueResourceLimitsInfo(clusterResource);
Resource headroom =
+ metrics.getUserMetrics(user) == null ? Resources.none() :
getHeadroom(queueUser, cachedResourceLimitsForHeadroom.getLimit(),
clusterResource, userLimit, nodePartition);
@@ -1352,7 +1381,7 @@ public class LeafQueue extends AbstractCSQueue {
LOG.debug("Headroom calculation for user " + user + ": " + " userLimit="
+ userLimit + " queueMaxAvailRes="
+ cachedResourceLimitsForHeadroom.getLimit() + " consumed="
- + queueUser.getUsed() + " headroom=" + headroom + " partition="
+ + queueUser.getUsed() + " partition="
+ nodePartition);
}
@@ -1713,7 +1742,7 @@ public class LeafQueue extends AbstractCSQueue {
.getSchedulableEntities()) {
computeUserLimitAndSetHeadroom(application, clusterResource,
RMNodeLabelsManager.NO_LABEL,
- SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
+ SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY, null);
}
} finally {
writeLock.unlock();
@@ -2052,4 +2081,14 @@ public class LeafQueue extends AbstractCSQueue {
public Set<String> getAllUsers() {
return this.getUsersManager().getUsers().keySet();
}
+
+ static class CachedUserLimit {
+ final Resource userLimit;
+ boolean canAssign = true;
+ Resource reservation = Resources.none();
+
+ CachedUserLimit(Resource userLimit) {
+ this.userLimit = userLimit;
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/945c0958/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index ba0f906..41a7ce8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -33,9 +33,11 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
+import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.PriorityQueue;
import java.util.Set;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
@@ -50,6 +52,7 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.LocalConfigurationProvider;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@@ -90,7 +93,6 @@ import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
import org.apache.hadoop.yarn.server.resourcemanager.AdminService;
import org.apache.hadoop.yarn.server.resourcemanager.Application;
-import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
@@ -156,8 +158,12 @@ import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.Assert;
+import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
@@ -3492,6 +3498,7 @@ public class TestCapacityScheduler {
rm.stop();
}
+
@Test
public void testHeadRoomCalculationWithDRC() throws Exception {
// test with total cluster resource of 20GB memory and 20 vcores.
@@ -4074,6 +4081,143 @@ public class TestCapacityScheduler {
rm.stop();
}
+ @Test (timeout = 300000)
+ public void testUserLimitThroughput() throws Exception {
+ // Since this is more of a performance unit test, only run if
+ // RunUserLimitThroughput is set (-DRunUserLimitThroughput=true)
+ Assume.assumeTrue(Boolean.valueOf(
+ System.getProperty("RunUserLimitThroughput")));
+
+ CapacitySchedulerConfiguration csconf =
+ new CapacitySchedulerConfiguration();
+ csconf.setMaximumApplicationMasterResourcePerQueuePercent("root", 100.0f);
+ csconf.setMaximumAMResourcePercentPerPartition("root", "", 100.0f);
+ csconf.setMaximumApplicationMasterResourcePerQueuePercent("root.default",
+ 100.0f);
+ csconf.setMaximumAMResourcePercentPerPartition("root.default", "", 100.0f);
+ csconf.setResourceComparator(DominantResourceCalculator.class);
+
+ YarnConfiguration conf = new YarnConfiguration(csconf);
+ conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+ ResourceScheduler.class);
+
+ MockRM rm = new MockRM(conf);
+ rm.start();
+
+ CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+ LeafQueue qb = (LeafQueue)cs.getQueue("default");
+
+ // For now make user limit large so we can activate all applications
+ qb.setUserLimitFactor((float)100.0);
+ qb.setupConfigurableCapacities();
+
+ SchedulerEvent addAppEvent;
+ SchedulerEvent addAttemptEvent;
+ Container container = mock(Container.class);
+ ApplicationSubmissionContext submissionContext =
+ mock(ApplicationSubmissionContext.class);
+
+ final int appCount = 100;
+ ApplicationId[] appids = new ApplicationId[appCount];
+ RMAppAttemptImpl[] attempts = new RMAppAttemptImpl[appCount];
+ ApplicationAttemptId[] appAttemptIds = new ApplicationAttemptId[appCount];
+ RMAppImpl[] apps = new RMAppImpl[appCount];
+ RMAppAttemptMetrics[] attemptMetrics = new RMAppAttemptMetrics[appCount];
+ for (int i=0; i<appCount; i++) {
+ appids[i] = BuilderUtils.newApplicationId(100, i);
+ appAttemptIds[i] =
+ BuilderUtils.newApplicationAttemptId(appids[i], 1);
+
+ attemptMetrics[i] =
+ new RMAppAttemptMetrics(appAttemptIds[i], rm.getRMContext());
+ apps[i] = mock(RMAppImpl.class);
+ when(apps[i].getApplicationId()).thenReturn(appids[i]);
+ attempts[i] = mock(RMAppAttemptImpl.class);
+ when(attempts[i].getMasterContainer()).thenReturn(container);
+ when(attempts[i].getSubmissionContext()).thenReturn(submissionContext);
+ when(attempts[i].getAppAttemptId()).thenReturn(appAttemptIds[i]);
+ when(attempts[i].getRMAppAttemptMetrics()).thenReturn(attemptMetrics[i]);
+ when(apps[i].getCurrentAppAttempt()).thenReturn(attempts[i]);
+
+ rm.getRMContext().getRMApps().put(appids[i], apps[i]);
+ addAppEvent =
+ new AppAddedSchedulerEvent(appids[i], "default", "user1");
+ cs.handle(addAppEvent);
+ addAttemptEvent =
+ new AppAttemptAddedSchedulerEvent(appAttemptIds[i], false);
+ cs.handle(addAttemptEvent);
+ }
+
+ // add nodes to cluster, so cluster has 20GB and 20 vcores
+ Resource newResource = Resource.newInstance(10 * GB, 10);
+ RMNode node = MockNodes.newNodeInfo(0, newResource, 1, "127.0.0.1");
+ cs.handle(new NodeAddedSchedulerEvent(node));
+
+ Resource newResource2 = Resource.newInstance(10 * GB, 10);
+ RMNode node2 = MockNodes.newNodeInfo(0, newResource2, 1, "127.0.0.2");
+ cs.handle(new NodeAddedSchedulerEvent(node2));
+
+ Priority u0Priority = TestUtils.createMockPriority(1);
+ RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ FiCaSchedulerApp[] fiCaApps = new FiCaSchedulerApp[appCount];
+ for (int i=0;i<appCount;i++) {
+ fiCaApps[i] =
+ cs.getSchedulerApplications().get(apps[i].getApplicationId())
+ .getCurrentAppAttempt();
+ // allocate container for app2 with 1GB memory and 1 vcore
+ fiCaApps[i].updateResourceRequests(Collections.singletonList(
+ TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 1, true,
+ u0Priority, recordFactory)));
+ }
+ // Now force everything to be over user limit
+ qb.setUserLimitFactor((float)0.0);
+
+ // Quiet the loggers while measuring throughput
+ for (Enumeration<?> loggers=LogManager.getCurrentLoggers();
+ loggers.hasMoreElements(); ) {
+ Logger logger = (Logger) loggers.nextElement();
+ logger.setLevel(Level.WARN);
+ }
+ final int topn = 20;
+ final int iterations = 2000000;
+ final int printInterval = 20000;
+ final float numerator = 1000.0f * printInterval;
+ PriorityQueue<Long> queue = new PriorityQueue<>(topn,
+ Collections.reverseOrder());
+
+ long n = Time.monotonicNow();
+ long timespent = 0;
+ for (int i = 0; i < iterations; i+=2) {
+ if (i > 0 && i % printInterval == 0){
+ long ts = (Time.monotonicNow() - n);
+ if (queue.size() < topn) {
+ queue.offer(ts);
+ } else {
+ Long last = queue.peek();
+ if (last > ts) {
+ queue.poll();
+ queue.offer(ts);
+ }
+ }
+ System.out.println(i + " " + (numerator / ts));
+ n= Time.monotonicNow();
+ }
+ cs.handle(new NodeUpdateSchedulerEvent(node));
+ cs.handle(new NodeUpdateSchedulerEvent(node2));
+ }
+ timespent=0;
+ int entries = queue.size();
+ while(queue.size() > 0){
+ long l = queue.poll();
+ timespent += l;
+ }
+ System.out.println("Avg of fastest " + entries + ": "
+ + numerator / (timespent / entries));
+ rm.stop();
+ }
+
@Test
public void testCSQueueBlocked() throws Exception {
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/945c0958/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index 4417132..2864d7f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -1146,7 +1146,7 @@ public class TestLeafQueue {
new ResourceLimits(clusterResource),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), qb, nodes, apps);
qb.computeUserLimitAndSetHeadroom(app_0, clusterResource,
- "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
+ "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY, null);
//maxqueue 16G, userlimit 13G, - 4G used = 9G
assertEquals(9*GB,app_0.getHeadroom().getMemorySize());
@@ -1169,7 +1169,7 @@ public class TestLeafQueue {
new ResourceLimits(clusterResource),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), qb, nodes, apps);
qb.computeUserLimitAndSetHeadroom(app_0, clusterResource,
- "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
+ "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY, null);
assertEquals(8*GB, qb.getUsedResources().getMemorySize());
assertEquals(4*GB, app_0.getCurrentConsumption().getMemorySize());
@@ -1219,7 +1219,7 @@ public class TestLeafQueue {
new ResourceLimits(clusterResource),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), qb, nodes, apps);
qb.computeUserLimitAndSetHeadroom(app_3, clusterResource,
- "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
+ "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY, null);
assertEquals(4*GB, qb.getUsedResources().getMemorySize());
//maxqueue 16G, userlimit 7G, used (by each user) 2G, headroom 5G (both)
assertEquals(5*GB, app_3.getHeadroom().getMemorySize());
@@ -1240,9 +1240,9 @@ public class TestLeafQueue {
new ResourceLimits(clusterResource),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), qb, nodes, apps);
qb.computeUserLimitAndSetHeadroom(app_4, clusterResource,
- "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
+ "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY, null);
qb.computeUserLimitAndSetHeadroom(app_3, clusterResource,
- "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
+ "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY, null);
//app3 is user1, active from last test case
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[03/50] [abbrv] hadoop git commit: Merge branch 'HDFS-7240' of
https://git-wip-us.apache.org/repos/asf/hadoop into HDFS-7240
Posted by xy...@apache.org.
Merge branch 'HDFS-7240' of https://git-wip-us.apache.org/repos/asf/hadoop into HDFS-7240
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a4246c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a4246c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a4246c8
Branch: refs/heads/HDFS-7240
Commit: 9a4246c80a40ccc1f53036bed2c7402644c9da65
Parents: ef9ba83 87154fc
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed Jul 12 17:19:05 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Wed Jul 12 17:19:05 2017 -0700
----------------------------------------------------------------------
----------------------------------------------------------------------
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[12/50] [abbrv] hadoop git commit: YARN-6792. Incorrect XML
convertion in NodeIDsInfo and LabelsToNodesInfo. Contributed by Giovanni
Matteo Fumarola.
Posted by xy...@apache.org.
YARN-6792. Incorrect XML convertion in NodeIDsInfo and LabelsToNodesInfo. Contributed by Giovanni Matteo Fumarola.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/228ddaa3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/228ddaa3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/228ddaa3
Branch: refs/heads/HDFS-7240
Commit: 228ddaa31d812533b862576445494bc2cd8a2884
Parents: 43f0503
Author: Sunil G <su...@apache.org>
Authored: Fri Jul 14 08:07:05 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Fri Jul 14 08:07:05 2017 +0530
----------------------------------------------------------------------
.../hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java | 5 ++++-
.../server/resourcemanager/webapp/dao/LabelsToNodesInfo.java | 6 +++++-
2 files changed, 9 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/228ddaa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
index c23b02a..5f45b96 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
@@ -26,7 +26,10 @@ import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
-@XmlRootElement(name = "labelsToNodesInfo")
+/**
+ * XML element uses to represent NodeIds' list.
+ */
+@XmlRootElement(name = "nodeIDsInfo")
@XmlAccessorType(XmlAccessType.FIELD)
public class NodeIDsInfo {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/228ddaa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
index 41dd410..e842d42 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
@@ -37,7 +37,11 @@ public class LabelsToNodesInfo {
public LabelsToNodesInfo() {
} // JAXB needs this
+ public LabelsToNodesInfo(Map<NodeLabelInfo, NodeIDsInfo> labelsToNodes) {
+ this.labelsToNodes = labelsToNodes;
+ }
+
public Map<NodeLabelInfo, NodeIDsInfo> getLabelsToNodes() {
- return labelsToNodes;
+ return labelsToNodes;
}
}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[44/50] [abbrv] hadoop git commit: HDFS-12133. Correct
ContentSummaryComputationContext Logger class name.. Contributed by Surendra
Singh Lilhore.
Posted by xy...@apache.org.
HDFS-12133. Correct ContentSummaryComputationContext Logger class name.. Contributed by Surendra Singh Lilhore.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04ff412d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04ff412d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04ff412d
Branch: refs/heads/HDFS-7240
Commit: 04ff412dabf3f6b9d884171c4140adbc636d5387
Parents: f8cd55f
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Wed Jul 19 23:43:10 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Wed Jul 19 23:43:10 2017 +0800
----------------------------------------------------------------------
.../hdfs/server/namenode/ContentSummaryComputationContext.java | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/04ff412d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 43e6f0d..c81f82c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -48,7 +48,8 @@ public class ContentSummaryComputationContext {
private int sleepNanoSec = 0;
public static final String REPLICATED = "Replicated";
- public static final Log LOG = LogFactory.getLog(INode.class);
+ public static final Log LOG = LogFactory
+ .getLog(ContentSummaryComputationContext.class);
private FSPermissionChecker pc;
/**
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[25/50] [abbrv] hadoop git commit: HDFS-11786. Add support to make
copyFromLocal multi threaded. Contributed by Mukul Kumar Singh.
Posted by xy...@apache.org.
HDFS-11786. Add support to make copyFromLocal multi threaded. Contributed by Mukul Kumar Singh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02b141ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02b141ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02b141ac
Branch: refs/heads/HDFS-7240
Commit: 02b141ac6059323ec43e472ca36dc570fdca386f
Parents: b778887
Author: Anu Engineer <ae...@apache.org>
Authored: Sun Jul 16 10:59:34 2017 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Sun Jul 16 10:59:34 2017 -0700
----------------------------------------------------------------------
.../apache/hadoop/fs/shell/CopyCommands.java | 112 +++++++++++-
.../apache/hadoop/fs/shell/MoveCommands.java | 4 +-
.../hadoop/fs/shell/TestCopyFromLocal.java | 173 +++++++++++++++++++
.../hadoop/fs/shell/TestCopyPreserveFlag.java | 19 ++
.../src/test/resources/testConf.xml | 44 ++++-
5 files changed, 346 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/02b141ac/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
index e2fad75..7b3c53e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
@@ -26,7 +26,11 @@ import java.net.URISyntaxException;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.TimeUnit;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -288,9 +292,113 @@ class CopyCommands {
}
public static class CopyFromLocal extends Put {
+ private ThreadPoolExecutor executor = null;
+ private int numThreads = 1;
+
+ private static final int MAX_THREADS =
+ Runtime.getRuntime().availableProcessors() * 2;
public static final String NAME = "copyFromLocal";
- public static final String USAGE = Put.USAGE;
- public static final String DESCRIPTION = "Identical to the -put command.";
+ public static final String USAGE =
+ "[-f] [-p] [-l] [-d] [-t <thread count>] <localsrc> ... <dst>";
+ public static final String DESCRIPTION =
+ "Copy files from the local file system " +
+ "into fs. Copying fails if the file already " +
+ "exists, unless the -f flag is given.\n" +
+ "Flags:\n" +
+ " -p : Preserves access and modification times, ownership and the" +
+ " mode.\n" +
+ " -f : Overwrites the destination if it already exists.\n" +
+ " -t <thread count> : Number of threads to be used, default is 1.\n" +
+ " -l : Allow DataNode to lazily persist the file to disk. Forces" +
+ " replication factor of 1. This flag will result in reduced" +
+ " durability. Use with care.\n" +
+ " -d : Skip creation of temporary file(<dst>._COPYING_).\n";
+
+ private void setNumberThreads(String numberThreadsString) {
+ if (numberThreadsString == null) {
+ numThreads = 1;
+ } else {
+ int parsedValue = Integer.parseInt(numberThreadsString);
+ if (parsedValue <= 1) {
+ numThreads = 1;
+ } else if (parsedValue > MAX_THREADS) {
+ numThreads = MAX_THREADS;
+ } else {
+ numThreads = parsedValue;
+ }
+ }
+ }
+
+ @Override
+ protected void processOptions(LinkedList<String> args) throws IOException {
+ CommandFormat cf =
+ new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l", "d");
+ cf.addOptionWithValue("t");
+ cf.parse(args);
+ setNumberThreads(cf.getOptValue("t"));
+ setOverwrite(cf.getOpt("f"));
+ setPreserve(cf.getOpt("p"));
+ setLazyPersist(cf.getOpt("l"));
+ setDirectWrite(cf.getOpt("d"));
+ getRemoteDestination(args);
+ // should have a -r option
+ setRecursive(true);
+ }
+
+ private void copyFile(PathData src, PathData target) throws IOException {
+ if (isPathRecursable(src)) {
+ throw new PathIsDirectoryException(src.toString());
+ }
+ super.copyFileToTarget(src, target);
+ }
+
+ @Override
+ protected void copyFileToTarget(PathData src, PathData target)
+ throws IOException {
+ // if number of thread is 1, mimic put and avoid threading overhead
+ if (numThreads == 1) {
+ copyFile(src, target);
+ return;
+ }
+
+ Runnable task = () -> {
+ try {
+ copyFile(src, target);
+ } catch (IOException e) {
+ displayError(e);
+ }
+ };
+ executor.submit(task);
+ }
+
+ @Override
+ protected void processArguments(LinkedList<PathData> args)
+ throws IOException {
+ executor = new ThreadPoolExecutor(numThreads, numThreads, 1,
+ TimeUnit.SECONDS, new ArrayBlockingQueue<>(1024),
+ new ThreadPoolExecutor.CallerRunsPolicy());
+ super.processArguments(args);
+
+ // issue the command and then wait for it to finish
+ executor.shutdown();
+ try {
+ executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MINUTES);
+ } catch (InterruptedException e) {
+ executor.shutdownNow();
+ displayError(e);
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ @VisibleForTesting
+ public int getNumThreads() {
+ return numThreads;
+ }
+
+ @VisibleForTesting
+ public ThreadPoolExecutor getExecutor() {
+ return executor;
+ }
}
public static class CopyToLocal extends Get {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/02b141ac/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index d359282..5ef4277 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.PathIOException;
import org.apache.hadoop.fs.PathExistsException;
-import org.apache.hadoop.fs.shell.CopyCommands.CopyFromLocal;
+import org.apache.hadoop.fs.shell.CopyCommands.Put;
/** Various commands for moving files */
@InterfaceAudience.Private
@@ -41,7 +41,7 @@ class MoveCommands {
/**
* Move local files to a remote filesystem
*/
- public static class MoveFromLocal extends CopyFromLocal {
+ public static class MoveFromLocal extends Put {
public static final String NAME = "moveFromLocal";
public static final String USAGE = "<localsrc> ... <dst>";
public static final String DESCRIPTION =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/02b141ac/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java
new file mode 100644
index 0000000..8d354b4
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java
@@ -0,0 +1,173 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.shell;
+
+import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang.math.RandomUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.shell.CopyCommands.CopyFromLocal;
+import org.junit.BeforeClass;
+import org.junit.AfterClass;
+import org.junit.Test;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test for copyFromLocal.
+ */
+public class TestCopyFromLocal {
+ private static final String FROM_DIR_NAME = "fromDir";
+ private static final String TO_DIR_NAME = "toDir";
+
+ private static FileSystem fs;
+ private static Path testDir;
+ private static Configuration conf;
+
+ public static int initialize(Path dir) throws Exception {
+ fs.mkdirs(dir);
+ Path fromDirPath = new Path(dir, FROM_DIR_NAME);
+ fs.mkdirs(fromDirPath);
+ Path toDirPath = new Path(dir, TO_DIR_NAME);
+ fs.mkdirs(toDirPath);
+
+ int numTotalFiles = 0;
+ int numDirs = RandomUtils.nextInt(5);
+ for (int dirCount = 0; dirCount < numDirs; ++dirCount) {
+ Path subDirPath = new Path(fromDirPath, "subdir" + dirCount);
+ fs.mkdirs(subDirPath);
+ int numFiles = RandomUtils.nextInt(10);
+ for (int fileCount = 0; fileCount < numFiles; ++fileCount) {
+ numTotalFiles++;
+ Path subFile = new Path(subDirPath, "file" + fileCount);
+ fs.createNewFile(subFile);
+ FSDataOutputStream output = fs.create(subFile, true);
+ for(int i = 0; i < 100; ++i) {
+ output.writeInt(i);
+ output.writeChar('\n');
+ }
+ output.close();
+ }
+ }
+
+ return numTotalFiles;
+ }
+
+ @BeforeClass
+ public static void init() throws Exception {
+ conf = new Configuration(false);
+ conf.set("fs.file.impl", LocalFileSystem.class.getName());
+ fs = FileSystem.getLocal(conf);
+ testDir = new FileSystemTestHelper().getTestRootPath(fs);
+ // don't want scheme on the path, just an absolute path
+ testDir = new Path(fs.makeQualified(testDir).toUri().getPath());
+
+ FileSystem.setDefaultUri(conf, fs.getUri());
+ fs.setWorkingDirectory(testDir);
+ }
+
+ @AfterClass
+ public static void cleanup() throws Exception {
+ fs.delete(testDir, true);
+ fs.close();
+ }
+
+ private void run(CommandWithDestination cmd, String... args) {
+ cmd.setConf(conf);
+ assertEquals(0, cmd.run(args));
+ }
+
+ @Test(timeout = 10000)
+ public void testCopyFromLocal() throws Exception {
+ Path dir = new Path("dir" + RandomStringUtils.randomNumeric(4));
+ TestCopyFromLocal.initialize(dir);
+ run(new TestMultiThreadedCopy(1, 0),
+ new Path(dir, FROM_DIR_NAME).toString(),
+ new Path(dir, TO_DIR_NAME).toString());
+ }
+
+ @Test(timeout = 10000)
+ public void testCopyFromLocalWithThreads() throws Exception {
+ Path dir = new Path("dir" + RandomStringUtils.randomNumeric(4));
+ int numFiles = TestCopyFromLocal.initialize(dir);
+ int maxThreads = Runtime.getRuntime().availableProcessors() * 2;
+ int randThreads = RandomUtils.nextInt(maxThreads);
+ int numActualThreads = randThreads == 0 ? 1 : randThreads;
+ String numThreads = Integer.toString(numActualThreads);
+ run(new TestMultiThreadedCopy(numActualThreads, numFiles), "-t", numThreads,
+ new Path(dir, FROM_DIR_NAME).toString(),
+ new Path(dir, TO_DIR_NAME).toString());
+ }
+
+ @Test(timeout = 10000)
+ public void testCopyFromLocalWithThreadWrong() throws Exception {
+ Path dir = new Path("dir" + RandomStringUtils.randomNumeric(4));
+ int numFiles = TestCopyFromLocal.initialize(dir);
+ int maxThreads = Runtime.getRuntime().availableProcessors() * 2;
+ String numThreads = Integer.toString(maxThreads * 2);
+ run(new TestMultiThreadedCopy(maxThreads, numFiles), "-t", numThreads,
+ new Path(dir, FROM_DIR_NAME).toString(),
+ new Path(dir, TO_DIR_NAME).toString());
+ }
+
+ @Test(timeout = 10000)
+ public void testCopyFromLocalWithZeroThreads() throws Exception {
+ Path dir = new Path("dir" + RandomStringUtils.randomNumeric(4));
+ TestCopyFromLocal.initialize(dir);
+ run(new TestMultiThreadedCopy(1, 0), "-t", "0",
+ new Path(dir, FROM_DIR_NAME).toString(),
+ new Path(dir, TO_DIR_NAME).toString());
+ }
+
+ private class TestMultiThreadedCopy extends CopyFromLocal {
+ private int expectedThreads;
+ private int expectedCompletedTaskCount;
+
+ TestMultiThreadedCopy(int expectedThreads,
+ int expectedCompletedTaskCount) {
+ this.expectedThreads = expectedThreads;
+ this.expectedCompletedTaskCount = expectedCompletedTaskCount;
+ }
+
+ @Override
+ protected void processArguments(LinkedList<PathData> args)
+ throws IOException {
+ // Check if the correct number of threads are spawned
+ Assert.assertEquals(expectedThreads, getNumThreads());
+ super.processArguments(args);
+ // Once the copy is complete, check following
+ // 1) number of completed tasks are same as expected
+ // 2) There are no active tasks in the executor
+ // 3) Executor has shutdown correctly
+ ThreadPoolExecutor executor = getExecutor();
+ Assert.assertEquals(executor.getCompletedTaskCount(),
+ expectedCompletedTaskCount);
+ Assert.assertEquals(executor.getActiveCount(), 0);
+ Assert.assertTrue(executor.isTerminated());
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/02b141ac/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
index 47dc601..8dd09e5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.shell.CopyCommands.Cp;
import org.apache.hadoop.fs.shell.CopyCommands.Get;
import org.apache.hadoop.fs.shell.CopyCommands.Put;
+import org.apache.hadoop.fs.shell.CopyCommands.CopyFromLocal;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -120,6 +121,24 @@ public class TestCopyPreserveFlag {
}
@Test(timeout = 10000)
+ public void testCopyFromLocal() throws Exception {
+ run(new CopyFromLocal(), FROM.toString(), TO.toString());
+ assertAttributesChanged(TO);
+ }
+
+ @Test(timeout = 10000)
+ public void testCopyFromLocalWithThreads() throws Exception {
+ run(new CopyFromLocal(), "-t", "10", FROM.toString(), TO.toString());
+ assertAttributesChanged(TO);
+ }
+
+ @Test(timeout = 10000)
+ public void testCopyFromLocalWithThreadsPreserve() throws Exception {
+ run(new CopyFromLocal(), "-p", "-t", "10", FROM.toString(), TO.toString());
+ assertAttributesPreserved(TO);
+ }
+
+ @Test(timeout = 10000)
public void testGetWithP() throws Exception {
run(new Get(), "-p", FROM.toString(), TO.toString());
assertAttributesPreserved(TO);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/02b141ac/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index 342b17c..64677f8 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -547,11 +547,51 @@
<comparators>
<comparator>
<type>RegexpComparator</type>
- <expected-output>^-copyFromLocal \[-f\] \[-p\] \[-l\] \[-d\] <localsrc> \.\.\. <dst> :\s*</expected-output>
+ <expected-output>^-copyFromLocal \[-f\] \[-p\] \[-l\] \[-d\] \[-t <thread count>\] <localsrc> \.\.\. <dst> :\s*</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
- <expected-output>^\s*Identical to the -put command\.\s*</expected-output>
+ <expected-output>^\s*Copy files from the local file system into fs.( )*Copying fails if the file already( )*</expected-output>
+ </comparator>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>^\s*exists, unless the -f flag is given.( )*</expected-output>
+ </comparator>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>^\s*Flags:( )*</expected-output>
+ </comparator>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>^\s*-p Preserves access and modification times, ownership and the( )*</expected-output>
+ </comparator>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>^\s*mode.( )*</expected-output>
+ </comparator>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>^\s*-f Overwrites the destination if it already exists.( )*</expected-output>
+ </comparator>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>^\s*-t <thread count> Number of threads to be used, default is 1.( )*</expected-output>
+ </comparator>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>^\s*-l Allow DataNode to lazily persist the file to disk. Forces( )*</expected-output>
+ </comparator>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>^\s*replication factor of 1. This flag will result in reduced( )*</expected-output>
+ </comparator>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>^\s*durability. Use with care.( )*</expected-output>
+ </comparator>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>^\s*-d Skip creation of temporary file\(<dst>\._COPYING_\).( )*</expected-output>
</comparator>
</comparators>
</test>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[36/50] [abbrv] hadoop git commit: Addendum patch for YARN-5731
Posted by xy...@apache.org.
Addendum patch for YARN-5731
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b7afc06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b7afc06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b7afc06
Branch: refs/heads/HDFS-7240
Commit: 0b7afc060c2024a882bd1934d0f722bfca731742
Parents: ccaf036
Author: Sunil G <su...@apache.org>
Authored: Tue Jul 18 11:49:09 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Tue Jul 18 11:49:09 2017 +0530
----------------------------------------------------------------------
.../ProportionalCapacityPreemptionPolicy.java | 25 ++++--------------
.../CapacitySchedulerConfiguration.java | 27 ++++++++++++++++++++
...TestCapacitySchedulerSurgicalPreemption.java | 6 +++--
3 files changed, 36 insertions(+), 22 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b7afc06/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 719d2eb..fc8ad2b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -231,28 +231,13 @@ public class ProportionalCapacityPreemptionPolicy
.add(new ReservedContainerCandidatesSelector(this));
}
+ boolean additionalPreemptionBasedOnReservedResource = csConfig.getBoolean(
+ CapacitySchedulerConfiguration.ADDITIONAL_RESOURCE_BALANCE_BASED_ON_RESERVED_CONTAINERS,
+ CapacitySchedulerConfiguration.DEFAULT_ADDITIONAL_RESOURCE_BALANCE_BASED_ON_RESERVED_CONTAINERS);
+
// initialize candidates preemption selection policies
- // When select candidates for reserved containers is enabled, exclude reserved
- // resource in fifo policy (less aggressive). Otherwise include reserved
- // resource.
- //
- // Why doing this? In YARN-4390, we added preemption-based-on-reserved-container
- // Support. To reduce unnecessary preemption for large containers. We will
- // not include reserved resources while calculating ideal-allocation in
- // FifoCandidatesSelector.
- //
- // Changes in YARN-4390 will significantly reduce number of containers preempted
- // When cluster has heterogeneous container requests. (Please check test
- // report: https://issues.apache.org/jira/secure/attachment/12796197/YARN-4390-test-results.pdf
- //
- // However, on the other hand, in some corner cases, especially for
- // fragmented cluster. It could lead to preemption cannot kick in in some
- // cases. Please see YARN-5731.
- //
- // So to solve the problem, we will include reserved when surgical preemption
- // for reserved container, which reverts behavior when YARN-4390 is disabled.
candidatesSelectionPolicies.add(new FifoCandidatesSelector(this,
- !selectCandidatesForResevedContainers));
+ additionalPreemptionBasedOnReservedResource));
// Do we need to specially consider intra queue
boolean isIntraQueuePreemptionEnabled = csConfig.getBoolean(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b7afc06/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 90a7e65..1e29d50 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -1206,6 +1206,33 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
0.2f;
/**
+ * By default, reserved resource will be excluded while balancing capacities
+ * of queues.
+ *
+ * Why doing this? In YARN-4390, we added preemption-based-on-reserved-container
+ * Support. To reduce unnecessary preemption for large containers. We will
+ * not include reserved resources while calculating ideal-allocation in
+ * FifoCandidatesSelector.
+ *
+ * Changes in YARN-4390 will significantly reduce number of containers preempted
+ * When cluster has heterogeneous container requests. (Please check test
+ * report: https://issues.apache.org/jira/secure/attachment/12796197/YARN-4390-test-results.pdf
+ *
+ * However, on the other hand, in some corner cases, especially for
+ * fragmented cluster. It could lead to preemption cannot kick in in some
+ * cases. Please see YARN-5731.
+ *
+ * So to solve the problem, make this change to be configurable, and please
+ * note that it is an experimental option.
+ */
+ public static final String
+ ADDITIONAL_RESOURCE_BALANCE_BASED_ON_RESERVED_CONTAINERS =
+ PREEMPTION_CONFIG_PREFIX
+ + "additional_res_balance_based_on_reserved_containers";
+ public static final boolean
+ DEFAULT_ADDITIONAL_RESOURCE_BALANCE_BASED_ON_RESERVED_CONTAINERS = false;
+
+ /**
* When calculating which containers to be preempted, we will try to preempt
* containers for reserved containers first. By default is false.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b7afc06/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java
index afd2f82..9146373 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java
@@ -813,9 +813,11 @@ public class TestCapacitySchedulerSurgicalPreemption
@Test(timeout = 60000)
public void testPreemptionForFragmentatedCluster() throws Exception {
+ // Set additional_balance_queue_based_on_reserved_res to true to get
+ // additional preemptions.
conf.setBoolean(
- CapacitySchedulerConfiguration.PREEMPTION_SELECT_CANDIDATES_FOR_RESERVED_CONTAINERS,
- false);
+ CapacitySchedulerConfiguration.ADDITIONAL_RESOURCE_BALANCE_BASED_ON_RESERVED_CONTAINERS,
+ true);
/**
* Two queues, a/b, each of them are 50/50
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[22/50] [abbrv] hadoop git commit: HDFS-12112.
TestBlockManager#testBlockManagerMachinesArray sometimes fails with NPE.
Contributed by Wei-Chiu Chuang.
Posted by xy...@apache.org.
HDFS-12112. TestBlockManager#testBlockManagerMachinesArray sometimes fails with NPE. Contributed by Wei-Chiu Chuang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b778887a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b778887a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b778887a
Branch: refs/heads/HDFS-7240
Commit: b778887af59d96f1fac30cae14be1cabbdb74c8b
Parents: 06ece48
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Sat Jul 15 10:38:31 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Sat Jul 15 10:38:31 2017 +0800
----------------------------------------------------------------------
.../hadoop/hdfs/server/blockmanagement/TestBlockManager.java | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b778887a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 3088b7b..6b1a979 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -1219,7 +1219,7 @@ public class TestBlockManager {
}
}
- @Test
+ @Test(timeout = 60000)
public void testBlockManagerMachinesArray() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster =
@@ -1230,6 +1230,8 @@ public class TestBlockManager {
final Path filePath = new Path("/tmp.txt");
final long fileLen = 1L;
DFSTestUtil.createFile(fs, filePath, fileLen, (short) 3, 1L);
+ DFSTestUtil.waitForReplication((DistributedFileSystem)fs,
+ filePath, (short) 3, 60000);
ArrayList<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 4);
FSNamesystem ns = cluster.getNamesystem();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[19/50] [abbrv] hadoop git commit: YARN-6625. yarn application -list
returns a tracking URL for AM that doesn't work in secured and HA
environment. (Yufei Gu)
Posted by xy...@apache.org.
YARN-6625. yarn application -list returns a tracking URL for AM that doesn't work in secured and HA environment. (Yufei Gu)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e0cde14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e0cde14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e0cde14
Branch: refs/heads/HDFS-7240
Commit: 9e0cde1469b8ffeb59619c64d6ece86b62424f04
Parents: e7d187a
Author: Yufei Gu <yu...@apache.org>
Authored: Fri Jul 14 14:10:45 2017 -0700
Committer: Yufei Gu <yu...@apache.org>
Committed: Fri Jul 14 14:10:45 2017 -0700
----------------------------------------------------------------------
.../server/webproxy/amfilter/AmIpFilter.java | 60 +++++++++++++----
.../server/webproxy/amfilter/TestAmFilter.java | 70 +++++++++++++++++++-
2 files changed, 114 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e0cde14/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
index 6579191..cdab405 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
@@ -23,10 +23,12 @@ import java.net.InetAddress;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.UnknownHostException;
+import java.net.HttpURLConnection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
+import java.util.Collection;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
@@ -38,12 +40,12 @@ import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.webproxy.ProxyUtils;
import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServlet;
-import org.apache.hadoop.yarn.util.RMHAUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -66,7 +68,8 @@ public class AmIpFilter implements Filter {
private String[] proxyHosts;
private Set<String> proxyAddresses = null;
private long lastUpdate;
- private Map<String, String> proxyUriBases;
+ @VisibleForTesting
+ Map<String, String> proxyUriBases;
@Override
public void init(FilterConfig conf) throws ServletException {
@@ -187,24 +190,55 @@ public class AmIpFilter implements Filter {
}
}
- protected String findRedirectUrl() throws ServletException {
- String addr;
- if (proxyUriBases.size() == 1) { // external proxy or not RM HA
+ @VisibleForTesting
+ public String findRedirectUrl() throws ServletException {
+ String addr = null;
+ if (proxyUriBases.size() == 1) {
+ // external proxy or not RM HA
addr = proxyUriBases.values().iterator().next();
- } else { // RM HA
+ } else {
+ // RM HA
YarnConfiguration conf = new YarnConfiguration();
- String activeRMId = RMHAUtils.findActiveRMHAId(conf);
- String addressPropertyPrefix = YarnConfiguration.useHttps(conf)
- ? YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS
- : YarnConfiguration.RM_WEBAPP_ADDRESS;
- String host = conf.get(
- HAUtil.addSuffix(addressPropertyPrefix, activeRMId));
- addr = proxyUriBases.get(host);
+ for (String rmId : getRmIds(conf)) {
+ String url = getUrlByRmId(conf, rmId);
+ if (isValidUrl(url)) {
+ addr = url;
+ break;
+ }
+ }
}
+
if (addr == null) {
throw new ServletException(
"Could not determine the proxy server for redirection");
}
return addr;
}
+
+ @VisibleForTesting
+ Collection<String> getRmIds(YarnConfiguration conf) {
+ return conf.getStringCollection(YarnConfiguration.RM_HA_IDS);
+ }
+
+ @VisibleForTesting
+ String getUrlByRmId(YarnConfiguration conf, String rmId) {
+ String addressPropertyPrefix = YarnConfiguration.useHttps(conf) ?
+ YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS :
+ YarnConfiguration.RM_WEBAPP_ADDRESS;
+ String host = conf.get(HAUtil.addSuffix(addressPropertyPrefix, rmId));
+ return proxyUriBases.get(host);
+ }
+
+ private boolean isValidUrl(String url) {
+ boolean isValid = false;
+ try {
+ HttpURLConnection conn =
+ (HttpURLConnection) new URL(url).openConnection();
+ conn.connect();
+ isValid = conn.getResponseCode() == HttpURLConnection.HTTP_OK;
+ } catch (Exception e) {
+ LOG.debug("Failed to connect to " + url + ": " + e.toString());
+ }
+ return isValid;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e0cde14/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
index b788f5d..687faea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
@@ -22,18 +22,41 @@ import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.net.HttpURLConnection;
-import java.util.*;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.Enumeration;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.ArrayList;
+import java.util.Arrays;
import java.util.concurrent.atomic.AtomicBoolean;
-import javax.servlet.*;
+import javax.servlet.FilterConfig;
+import javax.servlet.FilterChain;
+import javax.servlet.Filter;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletResponse;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletException;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import org.apache.hadoop.http.TestHttpServer;
import org.apache.hadoop.yarn.server.webproxy.ProxyUtils;
import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServlet;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.server.ServerConnector;
+import org.eclipse.jetty.servlet.ServletContextHandler;
+import org.eclipse.jetty.servlet.ServletHolder;
+import org.eclipse.jetty.util.thread.QueuedThreadPool;
import org.glassfish.grizzly.servlet.HttpServletResponseImpl;
import org.junit.Test;
import org.mockito.Mockito;
@@ -121,6 +144,47 @@ public class TestAmFilter {
filter.destroy();
}
+ @Test
+ public void testFindRedirectUrl() throws Exception {
+ final String rm1 = "rm1";
+ final String rm2 = "rm2";
+ // generate a valid URL
+ final String rm1Url = startHttpServer();
+ // invalid url
+ final String rm2Url = "host2:8088";
+
+ TestAmIpFilter filter = new TestAmIpFilter();
+ TestAmIpFilter spy = Mockito.spy(filter);
+ // make sure findRedirectUrl() go to HA branch
+ spy.proxyUriBases = new HashMap<>();
+ spy.proxyUriBases.put(rm1, rm1Url);
+ spy.proxyUriBases.put(rm2, rm2Url);
+
+ Collection<String> rmIds = new ArrayList<>(Arrays.asList(rm1, rm2));
+ Mockito.doReturn(rmIds).when(spy).getRmIds(Mockito.any());
+ Mockito.doReturn(rm1Url).when(spy)
+ .getUrlByRmId(Mockito.any(), Mockito.eq(rm2));
+ Mockito.doReturn(rm2Url).when(spy)
+ .getUrlByRmId(Mockito.any(), Mockito.eq(rm1));
+
+ assertEquals(spy.findRedirectUrl(), rm1Url);
+ }
+
+ private String startHttpServer() throws Exception {
+ Server server = new Server(0);
+ ((QueuedThreadPool)server.getThreadPool()).setMaxThreads(10);
+ ServletContextHandler context = new ServletContextHandler();
+ context.setContextPath("/foo");
+ server.setHandler(context);
+ String servletPath = "/bar";
+ context.addServlet(new ServletHolder(TestHttpServer.EchoServlet.class),
+ servletPath);
+ ((ServerConnector)server.getConnectors()[0]).setHost("localhost");
+ server.start();
+ System.setProperty("sun.net.http.allowRestrictedHeaders", "true");
+ return server.getURI().toString() + servletPath;
+ }
+
/**
* Test AmIpFilter
*/
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[16/50] [abbrv] hadoop git commit: HDFS-12130. Optimizing permission
check for getContentSummary.
Posted by xy...@apache.org.
HDFS-12130. Optimizing permission check for getContentSummary.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a29fe100
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a29fe100
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a29fe100
Branch: refs/heads/HDFS-7240
Commit: a29fe100b3c671954b759add5923a2b44af9e6a4
Parents: a5ae5ac
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Fri Jul 14 11:53:00 2017 -0700
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Fri Jul 14 13:36:27 2017 -0700
----------------------------------------------------------------------
.../server/blockmanagement/BlockCollection.java | 4 +-
.../ContentSummaryComputationContext.java | 20 ++
.../namenode/DirectoryWithQuotaFeature.java | 4 +-
.../server/namenode/FSDirStatAndListingOp.java | 9 +-
.../server/namenode/FSPermissionChecker.java | 32 +++
.../hadoop/hdfs/server/namenode/INode.java | 9 +-
.../hdfs/server/namenode/INodeDirectory.java | 9 +-
.../hdfs/server/namenode/INodeReference.java | 3 +-
.../snapshot/DirectorySnapshottableFeature.java | 3 +-
.../snapshot/DirectoryWithSnapshotFeature.java | 3 +-
.../hdfs/server/namenode/snapshot/Snapshot.java | 4 +-
.../TestGetContentSummaryWithPermission.java | 201 +++++++++++++++++++
12 files changed, 285 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a29fe100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 2f214be..b880590 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -21,6 +21,7 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.security.AccessControlException;
/**
* This interface is used by the block manager to expose a
@@ -36,7 +37,8 @@ public interface BlockCollection {
/**
* Get content summary.
*/
- public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps);
+ public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps)
+ throws AccessControlException;
/**
* @return the number of blocks or block groups
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a29fe100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 8d5aa0d..43e6f0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -20,11 +20,14 @@ package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.security.AccessControlException;
+
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
@@ -46,6 +49,8 @@ public class ContentSummaryComputationContext {
public static final String REPLICATED = "Replicated";
public static final Log LOG = LogFactory.getLog(INode.class);
+
+ private FSPermissionChecker pc;
/**
* Constructor
*
@@ -57,6 +62,12 @@ public class ContentSummaryComputationContext {
*/
public ContentSummaryComputationContext(FSDirectory dir,
FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
+ this(dir, fsn, limitPerRun, sleepMicroSec, null);
+ }
+
+ public ContentSummaryComputationContext(FSDirectory dir,
+ FSNamesystem fsn, long limitPerRun, long sleepMicroSec,
+ FSPermissionChecker pc) {
this.dir = dir;
this.fsn = fsn;
this.limitPerRun = limitPerRun;
@@ -65,6 +76,7 @@ public class ContentSummaryComputationContext {
this.snapshotCounts = new ContentCounts.Builder().build();
this.sleepMilliSec = sleepMicroSec/1000;
this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
+ this.pc = pc;
}
/** Constructor for blocking computation. */
@@ -186,4 +198,12 @@ public class ContentSummaryComputationContext {
}
return "";
}
+
+ void checkPermission(INodeDirectory inode, int snapshotId, FsAction access)
+ throws AccessControlException {
+ if (dir != null && dir.isPermissionEnabled()
+ && pc != null && !pc.isSuperUser()) {
+ pc.checkPermission(inode, snapshotId, access);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a29fe100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
index 31b45ad..0968c65 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.EnumCounters;
+import org.apache.hadoop.security.AccessControlException;
/**
* Quota feature for {@link INodeDirectory}.
@@ -125,7 +126,8 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
}
ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir,
- final ContentSummaryComputationContext summary) {
+ final ContentSummaryComputationContext summary)
+ throws AccessControlException {
final long original = summary.getCounts().getStoragespace();
long oldYieldCount = summary.getYieldCount();
dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a29fe100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 04efa65..4c92249 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -127,10 +127,8 @@ class FSDirStatAndListingOp {
FSDirectory fsd, String src) throws IOException {
FSPermissionChecker pc = fsd.getPermissionChecker();
final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ_LINK);
- if (fsd.isPermissionEnabled()) {
- fsd.checkPermission(pc, iip, false, null, null, null,
- FsAction.READ_EXECUTE);
- }
+ // getContentSummaryInt() call will check access (if enabled) when
+ // traversing all sub directories.
return getContentSummaryInt(fsd, iip);
}
@@ -513,7 +511,8 @@ class FSDirStatAndListingOp {
// processed. 0 means disabled. I.e. blocking for the entire duration.
ContentSummaryComputationContext cscc =
new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
- fsd.getContentCountLimit(), fsd.getContentSleepMicroSec());
+ fsd.getContentCountLimit(), fsd.getContentSleepMicroSec(),
+ fsd.getPermissionChecker());
ContentSummary cs = targetNode.computeAndConvertContentSummary(
iip.getPathSnapshotId(), cscc);
fsd.addYieldCount(cscc.getYieldCount());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a29fe100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index f1250dd..f745a6c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -195,6 +195,38 @@ class FSPermissionChecker implements AccessControlEnforcer {
ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
}
+ /**
+ * Check permission only for the given inode (not checking the children's
+ * access).
+ *
+ * @param inode the inode to check.
+ * @param snapshotId the snapshot id.
+ * @param access the target access.
+ * @throws AccessControlException
+ */
+ void checkPermission(INode inode, int snapshotId, FsAction access)
+ throws AccessControlException {
+ try {
+ byte[][] localComponents = {inode.getLocalNameBytes()};
+ INodeAttributes[] iNodeAttr = {inode.getSnapshotINode(snapshotId)};
+ AccessControlEnforcer enforcer = getAccessControlEnforcer();
+ enforcer.checkPermission(
+ fsOwner, supergroup, callerUgi,
+ iNodeAttr, // single inode attr in the array
+ new INode[]{inode}, // single inode in the array
+ localComponents, snapshotId,
+ null, -1, // this will skip checkTraverse() because
+ // not checking ancestor here
+ false, null, null,
+ access, // the target access to be checked against the inode
+ null, // passing null sub access avoids checking children
+ false);
+ } catch (AccessControlException ace) {
+ throw new AccessControlException(
+ toAccessControlString(inode, inode.getFullPathName(), access));
+ }
+ }
+
@Override
public void checkPermission(String fsOwner, String supergroup,
UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a29fe100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 1f982ca..d768e08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.Diff;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.ChunkedArrayList;
import org.apache.hadoop.util.StringUtils;
@@ -418,7 +419,8 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
public abstract void destroyAndCollectBlocks(ReclaimContext reclaimContext);
/** Compute {@link ContentSummary}. Blocking call */
- public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) {
+ public final ContentSummary computeContentSummary(
+ BlockStoragePolicySuite bsps) throws AccessControlException {
return computeAndConvertContentSummary(Snapshot.CURRENT_STATE_ID,
new ContentSummaryComputationContext(bsps));
}
@@ -427,7 +429,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* Compute {@link ContentSummary}.
*/
public final ContentSummary computeAndConvertContentSummary(int snapshotId,
- ContentSummaryComputationContext summary) {
+ ContentSummaryComputationContext summary) throws AccessControlException {
computeContentSummary(snapshotId, summary);
final ContentCounts counts = summary.getCounts();
final ContentCounts snapshotCounts = summary.getSnapshotCounts();
@@ -461,7 +463,8 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* @return The same objects as summary.
*/
public abstract ContentSummaryComputationContext computeContentSummary(
- int snapshotId, ContentSummaryComputationContext summary);
+ int snapshotId, ContentSummaryComputationContext summary)
+ throws AccessControlException;
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a29fe100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 4012783..3b7fa4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -26,6 +26,7 @@ import java.util.List;
import java.util.Map;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.security.AccessControlException;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
@@ -632,7 +634,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
@Override
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
- ContentSummaryComputationContext summary) {
+ ContentSummaryComputationContext summary) throws AccessControlException {
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf != null && snapshotId == Snapshot.CURRENT_STATE_ID) {
final ContentCounts counts = new ContentCounts.Builder().build();
@@ -654,7 +656,10 @@ public class INodeDirectory extends INodeWithAdditionalFields
}
protected ContentSummaryComputationContext computeDirectoryContentSummary(
- ContentSummaryComputationContext summary, int snapshotId) {
+ ContentSummaryComputationContext summary, int snapshotId)
+ throws AccessControlException{
+ // throws exception if failing the permission check
+ summary.checkPermission(this, snapshotId, FsAction.READ_EXECUTE);
ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
// Explicit traversing is done to enable repositioning after relinquishing
// and reacquiring locks.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a29fe100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index 1b85237..db2026d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeat
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.security.AccessControlException;
/**
* An anonymous reference to an inode.
@@ -314,7 +315,7 @@ public abstract class INodeReference extends INode {
@Override
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
- ContentSummaryComputationContext summary) {
+ ContentSummaryComputationContext summary) throws AccessControlException {
return referred.computeContentSummary(snapshotId, summary);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a29fe100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index fbfc278..0ab928d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
@@ -234,7 +235,7 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
@Override
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
- final ContentCounts counts) {
+ final ContentCounts counts) throws AccessControlException {
counts.addContent(Content.SNAPSHOT, snapshotsByNames.size());
counts.addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
super.computeContentSummary4Snapshot(bsps, counts);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a29fe100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 0111b3b..7535879 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.util.Diff.UndoInfo;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.security.AccessControlException;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
@@ -630,7 +631,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
}
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
- final ContentCounts counts) {
+ final ContentCounts counts) throws AccessControlException {
// Create a new blank summary context for blocking processing of subtree.
ContentSummaryComputationContext summary =
new ContentSummaryComputationContext(bsps);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a29fe100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
index e98e766..515f164 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
+import org.apache.hadoop.security.AccessControlException;
/** Snapshot of a sub-tree in the namesystem. */
@InterfaceAudience.Private
@@ -176,7 +177,8 @@ public class Snapshot implements Comparable<byte[]> {
@Override
public ContentSummaryComputationContext computeContentSummary(
- int snapshotId, ContentSummaryComputationContext summary) {
+ int snapshotId, ContentSummaryComputationContext summary)
+ throws AccessControlException {
return computeDirectoryContentSummary(summary, snapshotId);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a29fe100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
new file mode 100644
index 0000000..03aa440
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.security.PrivilegedExceptionAction;
+
+import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * This class tests get content summary with permission settings.
+ */
+public class TestGetContentSummaryWithPermission {
+ protected static final short REPLICATION = 3;
+ protected static final long BLOCKSIZE = 1024;
+
+ private Configuration conf;
+ private MiniDFSCluster cluster;
+ private DistributedFileSystem dfs;
+
+ @Before
+ public void setUp() throws Exception {
+ conf = new Configuration();
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
+ cluster =
+ new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
+ cluster.waitActive();
+
+ dfs = cluster.getFileSystem();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (cluster != null) {
+ cluster.shutdown();
+ cluster = null;
+ }
+ }
+
+ /**
+ * Test getContentSummary for super user. For super user, whatever
+ * permission the directories are with, always allowed to access
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testGetContentSummarySuperUser() throws Exception {
+ final Path foo = new Path("/fooSuper");
+ final Path bar = new Path(foo, "barSuper");
+ final Path baz = new Path(bar, "bazSuper");
+ dfs.mkdirs(bar);
+ DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
+
+ ContentSummary summary;
+
+ summary = cluster.getNameNodeRpc().getContentSummary(
+ foo.toString());
+ verifySummary(summary, 2, 1, 10);
+
+ dfs.setPermission(foo, new FsPermission((short)0));
+
+ summary = cluster.getNameNodeRpc().getContentSummary(
+ foo.toString());
+ verifySummary(summary, 2, 1, 10);
+
+ dfs.setPermission(bar, new FsPermission((short)0));
+
+ summary = cluster.getNameNodeRpc().getContentSummary(
+ foo.toString());
+ verifySummary(summary, 2, 1, 10);
+
+ dfs.setPermission(baz, new FsPermission((short)0));
+
+ summary = cluster.getNameNodeRpc().getContentSummary(
+ foo.toString());
+ verifySummary(summary, 2, 1, 10);
+ }
+
+ /**
+ * Test getContentSummary for non-super, non-owner. Such users are restricted
+ * by permission of subdirectories. Namely if there is any subdirectory that
+ * does not have READ_EXECUTE access, AccessControlException will be thrown.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testGetContentSummaryNonSuperUser() throws Exception {
+ final Path foo = new Path("/fooNoneSuper");
+ final Path bar = new Path(foo, "barNoneSuper");
+ final Path baz = new Path(bar, "bazNoneSuper");
+ // run as some random non-superuser, non-owner user.
+ final UserGroupInformation userUgi =
+ UserGroupInformation.createUserForTesting(
+ "randomUser", new String[]{"randomGroup"});
+ dfs.mkdirs(bar);
+ DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
+
+ // by default, permission is rwxr-xr-x, as long as READ and EXECUTE are set,
+ // content summary should accessible
+ FileStatus fileStatus;
+ fileStatus = dfs.getFileStatus(foo);
+ assertEquals((short)755, fileStatus.getPermission().toOctal());
+ fileStatus = dfs.getFileStatus(bar);
+ assertEquals((short)755, fileStatus.getPermission().toOctal());
+ // file has no EXECUTE, it is rw-r--r-- default
+ fileStatus = dfs.getFileStatus(baz);
+ assertEquals((short)644, fileStatus.getPermission().toOctal());
+
+ // by default, can get content summary
+ ContentSummary summary =
+ userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+ () -> cluster.getNameNodeRpc().getContentSummary(
+ foo.toString()));
+ verifySummary(summary, 2, 1, 10);
+
+ // set empty access on root dir, should disallow content summary
+ dfs.setPermission(foo, new FsPermission((short)0));
+ try {
+ userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+ () -> cluster.getNameNodeRpc().getContentSummary(
+ foo.toString()));
+ fail("Should've fail due to access control exception.");
+ } catch (AccessControlException e) {
+ assertTrue(e.getMessage().contains("Permission denied"));
+ }
+
+ // restore foo's permission to allow READ_EXECUTE
+ dfs.setPermission(foo,
+ new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
+
+ // set empty access on subdir, should disallow content summary from root dir
+ dfs.setPermission(bar, new FsPermission((short)0));
+
+ try {
+ userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+ () -> cluster.getNameNodeRpc().getContentSummary(
+ foo.toString()));
+ fail("Should've fail due to access control exception.");
+ } catch (AccessControlException e) {
+ assertTrue(e.getMessage().contains("Permission denied"));
+ }
+
+ // restore the permission of subdir to READ_EXECUTE. enable
+ // getContentSummary again for root
+ dfs.setPermission(bar,
+ new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
+
+ summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+ () -> cluster.getNameNodeRpc().getContentSummary(
+ foo.toString()));
+ verifySummary(summary, 2, 1, 10);
+
+ // permission of files under the directory does not affect
+ // getContentSummary
+ dfs.setPermission(baz, new FsPermission((short)0));
+ summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+ () -> cluster.getNameNodeRpc().getContentSummary(
+ foo.toString()));
+ verifySummary(summary, 2, 1, 10);
+ }
+
+ private void verifySummary(ContentSummary summary, int dirCount,
+ int fileCount, int length) {
+ assertEquals(dirCount, summary.getDirectoryCount());
+ assertEquals(fileCount, summary.getFileCount());
+ assertEquals(length, summary.getLength());
+ }
+
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240
Posted by xy...@apache.org.
Merge branch 'trunk' into HDFS-7240
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3a7f3b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3a7f3b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3a7f3b2
Branch: refs/heads/HDFS-7240
Commit: b3a7f3b2dfffdd83abcfbe630e31acfcf68f5521
Parents: 84e11c7 c21c260
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed Jul 19 14:52:35 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Wed Jul 19 14:52:35 2017 -0700
----------------------------------------------------------------------
hadoop-client-modules/hadoop-client/pom.xml | 4 -
.../util/TestCertificateUtil.java | 6 +-
hadoop-common-project/hadoop-common/pom.xml | 4 -
.../org/apache/hadoop/conf/Configuration.java | 27 +-
.../apache/hadoop/conf/ReconfigurableBase.java | 7 +-
.../hadoop/conf/ReconfigurationServlet.java | 8 +-
.../hadoop/crypto/JceAesCtrCryptoCodec.java | 8 +-
.../hadoop/crypto/OpensslAesCtrCryptoCodec.java | 8 +-
.../org/apache/hadoop/crypto/OpensslCipher.java | 8 +-
.../crypto/key/kms/KMSClientProvider.java | 39 +-
.../key/kms/LoadBalancingKMSClientProvider.java | 90 +-
.../crypto/random/OpensslSecureRandom.java | 8 +-
.../hadoop/crypto/random/OsSecureRandom.java | 9 +-
.../apache/hadoop/fs/AbstractFileSystem.java | 6 +-
.../java/org/apache/hadoop/fs/ChecksumFs.java | 8 +-
.../fs/CommonConfigurationKeysPublic.java | 29 +
.../hadoop/fs/DelegationTokenRenewer.java | 10 +-
.../org/apache/hadoop/fs/FSInputChecker.java | 9 +-
.../java/org/apache/hadoop/fs/FileContext.java | 10 +-
.../java/org/apache/hadoop/fs/FileUtil.java | 10 +-
.../main/java/org/apache/hadoop/fs/FsShell.java | 6 +-
.../apache/hadoop/fs/FsShellPermissions.java | 4 +-
.../main/java/org/apache/hadoop/fs/Globber.java | 7 +-
.../org/apache/hadoop/fs/HarFileSystem.java | 11 +-
.../org/apache/hadoop/fs/LocalDirAllocator.java | 9 +-
.../main/java/org/apache/hadoop/fs/Trash.java | 7 +-
.../apache/hadoop/fs/TrashPolicyDefault.java | 8 +-
.../org/apache/hadoop/fs/ftp/FTPFileSystem.java | 8 +-
.../hadoop/fs/permission/FsPermission.java | 6 +-
.../hadoop/fs/sftp/SFTPConnectionPool.java | 7 +-
.../apache/hadoop/fs/sftp/SFTPFileSystem.java | 7 +-
.../org/apache/hadoop/fs/shell/Command.java | 6 +-
.../apache/hadoop/fs/shell/CopyCommands.java | 112 ++-
.../apache/hadoop/fs/shell/MoveCommands.java | 4 +-
.../apache/hadoop/ha/ActiveStandbyElector.java | 15 +-
.../apache/hadoop/ha/FailoverController.java | 10 +-
.../main/java/org/apache/hadoop/ha/HAAdmin.java | 8 +-
.../org/apache/hadoop/ha/HealthMonitor.java | 8 +-
.../java/org/apache/hadoop/ha/NodeFencer.java | 6 +-
.../org/apache/hadoop/ha/PowerShellFencer.java | 7 +-
.../apache/hadoop/ha/ShellCommandFencer.java | 7 +-
.../org/apache/hadoop/ha/SshFenceByTcpPort.java | 18 +-
.../java/org/apache/hadoop/ha/StreamPumper.java | 8 +-
.../apache/hadoop/ha/ZKFailoverController.java | 20 +-
...HAServiceProtocolServerSideTranslatorPB.java | 6 +-
.../org/apache/hadoop/http/HttpServer2.java | 6 +-
.../hadoop/http/lib/StaticUserWebFilter.java | 7 +-
.../java/org/apache/hadoop/io/BloomMapFile.java | 6 +-
.../apache/hadoop/io/FastByteComparisons.java | 7 +-
.../main/java/org/apache/hadoop/io/IOUtils.java | 5 +-
.../main/java/org/apache/hadoop/io/MapFile.java | 8 +-
.../org/apache/hadoop/io/ReadaheadPool.java | 6 +-
.../java/org/apache/hadoop/io/SequenceFile.java | 7 +-
.../main/java/org/apache/hadoop/io/UTF8.java | 5 +-
.../apache/hadoop/io/compress/CodecPool.java | 6 +-
.../io/compress/CompressionCodecFactory.java | 8 +-
.../apache/hadoop/io/compress/DefaultCodec.java | 6 +-
.../io/compress/bzip2/Bzip2Compressor.java | 8 +-
.../io/compress/bzip2/Bzip2Decompressor.java | 8 +-
.../hadoop/io/compress/bzip2/Bzip2Factory.java | 6 +-
.../hadoop/io/compress/lz4/Lz4Compressor.java | 8 +-
.../hadoop/io/compress/lz4/Lz4Decompressor.java | 8 +-
.../io/compress/snappy/SnappyCompressor.java | 8 +-
.../io/compress/snappy/SnappyDecompressor.java | 8 +-
.../io/compress/zlib/BuiltInZlibDeflater.java | 8 +-
.../hadoop/io/compress/zlib/ZlibCompressor.java | 8 +-
.../hadoop/io/compress/zlib/ZlibFactory.java | 8 +-
.../apache/hadoop/io/erasurecode/CodecUtil.java | 6 +-
.../io/erasurecode/ErasureCodeNative.java | 8 +-
.../org/apache/hadoop/io/file/tfile/BCFile.java | 6 +-
.../hadoop/io/file/tfile/Compression.java | 6 +-
.../org/apache/hadoop/io/file/tfile/TFile.java | 8 +-
.../hadoop/io/file/tfile/TFileDumper.java | 8 +-
.../org/apache/hadoop/io/nativeio/NativeIO.java | 16 +-
.../nativeio/SharedFileDescriptorFactory.java | 7 +-
.../apache/hadoop/io/retry/RetryPolicies.java | 6 +-
.../org/apache/hadoop/io/retry/RetryUtils.java | 6 +-
.../io/serializer/SerializationFactory.java | 8 +-
.../org/apache/hadoop/ipc/CallQueueManager.java | 7 +-
.../main/java/org/apache/hadoop/ipc/Client.java | 6 +-
.../org/apache/hadoop/ipc/FairCallQueue.java | 6 +-
.../apache/hadoop/ipc/ProtobufRpcEngine.java | 7 +-
.../main/java/org/apache/hadoop/ipc/RPC.java | 6 +-
.../org/apache/hadoop/ipc/RefreshRegistry.java | 7 +-
.../java/org/apache/hadoop/ipc/RetryCache.java | 6 +-
.../main/java/org/apache/hadoop/ipc/Server.java | 16 +-
.../ipc/WeightedRoundRobinMultiplexer.java | 8 +-
.../apache/hadoop/ipc/WritableRpcEngine.java | 6 +-
.../hadoop/ipc/metrics/RetryCacheMetrics.java | 6 +-
.../hadoop/ipc/metrics/RpcDetailedMetrics.java | 8 +-
.../apache/hadoop/ipc/metrics/RpcMetrics.java | 6 +-
.../org/apache/hadoop/jmx/JMXJsonServlet.java | 7 +-
.../hadoop/metrics2/impl/MBeanInfoBuilder.java | 2 +-
.../hadoop/metrics2/impl/MetricsConfig.java | 8 +-
.../metrics2/impl/MetricsSinkAdapter.java | 9 +-
.../metrics2/impl/MetricsSourceAdapter.java | 7 +-
.../hadoop/metrics2/impl/MetricsSystemImpl.java | 6 +-
.../hadoop/metrics2/lib/MethodMetric.java | 7 +-
.../metrics2/lib/MetricsSourceBuilder.java | 7 +-
.../metrics2/lib/MutableMetricsFactory.java | 7 +-
.../hadoop/metrics2/lib/MutableRates.java | 7 +-
.../lib/MutableRatesWithAggregation.java | 7 +-
.../hadoop/metrics2/sink/GraphiteSink.java | 7 +-
.../sink/ganglia/AbstractGangliaSink.java | 10 +-
.../metrics2/sink/ganglia/GangliaSink30.java | 6 +-
.../metrics2/sink/ganglia/GangliaSink31.java | 7 +-
.../org/apache/hadoop/metrics2/util/MBeans.java | 6 +-
.../hadoop/metrics2/util/MetricsCache.java | 6 +-
.../main/java/org/apache/hadoop/net/DNS.java | 6 +-
.../java/org/apache/hadoop/net/NetUtils.java | 6 +-
.../apache/hadoop/net/ScriptBasedMapping.java | 8 +-
.../apache/hadoop/net/SocketIOWithTimeout.java | 6 +-
.../org/apache/hadoop/net/TableMapping.java | 6 +-
.../apache/hadoop/net/unix/DomainSocket.java | 10 +-
.../hadoop/net/unix/DomainSocketWatcher.java | 12 +-
.../AuthenticationWithProxyUserFilter.java | 8 +-
.../hadoop/security/CompositeGroupsMapping.java | 7 +-
.../org/apache/hadoop/security/Credentials.java | 10 +-
.../java/org/apache/hadoop/security/Groups.java | 7 +-
.../HttpCrossOriginFilterInitializer.java | 8 +-
.../security/JniBasedUnixGroupsMapping.java | 8 +-
.../JniBasedUnixGroupsMappingWithFallback.java | 8 +-
.../JniBasedUnixGroupsNetgroupMapping.java | 6 +-
...edUnixGroupsNetgroupMappingWithFallback.java | 8 +-
.../hadoop/security/LdapGroupsMapping.java | 7 +-
.../apache/hadoop/security/ProviderUtils.java | 7 +-
.../apache/hadoop/security/SaslInputStream.java | 7 +-
.../apache/hadoop/security/SaslRpcClient.java | 7 +-
.../apache/hadoop/security/SaslRpcServer.java | 6 +-
.../apache/hadoop/security/SecurityUtil.java | 9 +-
.../hadoop/security/ShellBasedIdMapping.java | 8 +-
.../ShellBasedUnixGroupsNetgroupMapping.java | 8 +-
.../hadoop/security/WhitelistBasedResolver.java | 7 +-
.../alias/AbstractJavaKeyStoreProvider.java | 6 +-
.../alias/CredentialProviderFactory.java | 10 +
.../authorize/ServiceAuthorizationManager.java | 9 +-
.../hadoop/security/http/CrossOriginFilter.java | 7 +-
.../security/ssl/FileBasedKeyStoresFactory.java | 8 +-
.../security/ssl/ReloadingX509TrustManager.java | 7 +-
.../hadoop/security/token/DtFileOperations.java | 7 +-
.../hadoop/security/token/DtUtilShell.java | 6 +-
.../org/apache/hadoop/security/token/Token.java | 6 +-
.../AbstractDelegationTokenSecretManager.java | 8 +-
.../apache/hadoop/service/AbstractService.java | 9 +-
.../apache/hadoop/service/CompositeService.java | 7 +-
.../service/LoggingStateChangeListener.java | 11 +-
.../hadoop/service/ServiceOperations.java | 5 +-
.../tracing/TracerConfigurationManager.java | 8 +-
.../hadoop/util/ApplicationClassLoader.java | 10 +-
.../apache/hadoop/util/AsyncDiskService.java | 7 +-
.../apache/hadoop/util/CombinedIPWhiteList.java | 7 +-
.../org/apache/hadoop/util/FileBasedIPList.java | 11 +-
.../main/java/org/apache/hadoop/util/GSet.java | 6 +-
.../hadoop/util/GenericOptionsParser.java | 7 +-
.../org/apache/hadoop/util/HostsFileReader.java | 9 +-
.../apache/hadoop/util/IntrusiveCollection.java | 7 +-
.../org/apache/hadoop/util/JvmPauseMonitor.java | 6 +-
.../org/apache/hadoop/util/MachineList.java | 6 +-
.../apache/hadoop/util/NativeCodeLoader.java | 8 +-
.../hadoop/util/NodeHealthScriptRunner.java | 7 +-
.../java/org/apache/hadoop/util/Progress.java | 6 +-
.../apache/hadoop/util/ShutdownHookManager.java | 7 +-
.../hadoop/util/ShutdownThreadsHelper.java | 7 +-
.../org/apache/hadoop/util/SysInfoLinux.java | 8 +-
.../org/apache/hadoop/util/SysInfoWindows.java | 7 +-
.../java/org/apache/hadoop/util/ThreadUtil.java | 7 +-
.../org/apache/hadoop/util/VersionInfo.java | 8 +-
.../hadoop/util/concurrent/AsyncGetFuture.java | 7 +-
.../hadoop/util/concurrent/ExecutorHelper.java | 8 +-
.../HadoopScheduledThreadPoolExecutor.java | 8 +-
.../concurrent/HadoopThreadPoolExecutor.java | 8 +-
.../src/main/resources/core-default.xml | 37 +-
.../3.0.0-alpha4/CHANGES.3.0.0-alpha4.md | 880 +++++++++++++++++++
.../3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.md | 492 +++++++++++
.../conf/TestCommonConfigurationFields.java | 1 +
.../apache/hadoop/conf/TestDeprecatedKeys.java | 2 +-
.../hadoop/crypto/CryptoStreamsTestBase.java | 6 +-
.../apache/hadoop/crypto/TestCryptoCodec.java | 7 +-
.../crypto/key/TestKeyProviderFactory.java | 11 +-
.../kms/TestLoadBalancingKMSClientProvider.java | 378 +++++++-
.../apache/hadoop/fs/FCStatisticsBaseTest.java | 7 +-
.../fs/FileContextCreateMkdirBaseTest.java | 2 +-
.../fs/FileContextMainOperationsBaseTest.java | 66 +-
.../hadoop/fs/FileContextPermissionBase.java | 5 +-
.../apache/hadoop/fs/FileContextUtilBase.java | 5 +-
.../org/apache/hadoop/fs/TestFileContext.java | 7 +-
.../org/apache/hadoop/fs/TestFileStatus.java | 8 +-
.../java/org/apache/hadoop/fs/TestFileUtil.java | 6 +-
.../org/apache/hadoop/fs/TestFsShellCopy.java | 6 +-
.../apache/hadoop/fs/TestFsShellReturnCode.java | 8 +-
.../org/apache/hadoop/fs/TestFsShellTouch.java | 6 +-
.../org/apache/hadoop/fs/TestHarFileSystem.java | 7 +-
.../java/org/apache/hadoop/fs/TestHardLink.java | 2 +-
.../org/apache/hadoop/fs/TestListFiles.java | 4 +-
.../fs/TestLocalFileSystemPermission.java | 2 +-
.../fs/contract/AbstractBondedFSContract.java | 8 +-
.../hadoop/fs/contract/ContractTestUtils.java | 8 +
.../hadoop/fs/loadGenerator/LoadGenerator.java | 10 +-
.../hadoop/fs/shell/TestCopyFromLocal.java | 173 ++++
.../hadoop/fs/shell/TestCopyPreserveFlag.java | 19 +
.../hadoop/ha/ActiveStandbyElectorTestUtil.java | 6 +-
.../org/apache/hadoop/ha/DummyHAService.java | 7 +-
.../org/apache/hadoop/ha/MiniZKFCCluster.java | 7 +-
.../ha/TestActiveStandbyElectorRealZK.java | 7 +-
.../java/org/apache/hadoop/ha/TestHAAdmin.java | 6 +-
.../org/apache/hadoop/ha/TestHealthMonitor.java | 6 +-
.../hadoop/ha/TestShellCommandFencer.java | 55 +-
.../apache/hadoop/ha/TestSshFenceByTcpPort.java | 6 +-
.../hadoop/ha/TestZKFailoverController.java | 5 +-
.../apache/hadoop/http/TestGlobalFilter.java | 6 +-
.../org/apache/hadoop/http/TestHttpServer.java | 6 +-
.../apache/hadoop/http/TestHttpServerLogs.java | 6 +-
.../hadoop/http/TestHttpServerWebapps.java | 9 +-
.../hadoop/http/TestHttpServerWithSpengo.java | 7 +-
.../org/apache/hadoop/http/TestPathFilter.java | 6 +-
.../apache/hadoop/http/TestSSLHttpServer.java | 7 +-
.../apache/hadoop/http/TestServletFilter.java | 6 +-
.../hadoop/http/resource/JerseyResource.java | 6 +-
.../org/apache/hadoop/io/TestArrayFile.java | 7 +-
.../hadoop/io/TestDefaultStringifier.java | 8 +-
.../org/apache/hadoop/io/TestSequenceFile.java | 7 +-
.../java/org/apache/hadoop/io/TestSetFile.java | 7 +-
.../org/apache/hadoop/io/TestWritableUtils.java | 7 +-
.../apache/hadoop/io/compress/TestCodec.java | 8 +-
.../io/compress/TestCompressionStreamReuse.java | 9 +-
.../apache/hadoop/io/nativeio/TestNativeIO.java | 10 +-
.../TestSharedFileDescriptorFactory.java | 7 +-
.../io/serializer/TestSerializationFactory.java | 6 +-
.../org/apache/hadoop/ipc/MiniRPCBenchmark.java | 13 +-
.../org/apache/hadoop/ipc/TestAsyncIPC.java | 10 +-
.../java/org/apache/hadoop/ipc/TestIPC.java | 22 +-
.../hadoop/ipc/TestIPCServerResponder.java | 10 +-
.../apache/hadoop/ipc/TestMiniRPCBenchmark.java | 2 +-
.../ipc/TestProtoBufRpcServerHandoff.java | 12 +-
.../java/org/apache/hadoop/ipc/TestRPC.java | 8 +-
.../apache/hadoop/ipc/TestRPCCompatibility.java | 8 +-
.../hadoop/ipc/TestRPCServerShutdown.java | 7 +-
.../apache/hadoop/ipc/TestRpcServerHandoff.java | 8 +-
.../java/org/apache/hadoop/ipc/TestSaslRPC.java | 23 +-
.../java/org/apache/hadoop/ipc/TestServer.java | 4 +-
.../ipc/TestWeightedRoundRobinMultiplexer.java | 7 +-
.../metrics2/impl/TestGangliaMetrics.java | 7 +-
.../hadoop/metrics2/impl/TestMetricsConfig.java | 7 +-
.../metrics2/impl/TestMetricsSystemImpl.java | 13 +-
.../hadoop/metrics2/impl/TestSinkQueue.java | 10 +-
.../hadoop/metrics2/lib/TestMutableMetrics.java | 7 +-
.../hadoop/metrics2/util/TestMetricsCache.java | 8 +-
.../org/apache/hadoop/net/ServerSocketUtil.java | 9 +-
.../java/org/apache/hadoop/net/TestDNS.java | 6 +-
.../org/apache/hadoop/net/TestNetUtils.java | 6 +-
.../hadoop/net/TestSocketIOWithTimeout.java | 8 +-
.../apache/hadoop/net/TestStaticMapping.java | 7 +-
.../hadoop/net/unix/TestDomainSocket.java | 2 +-
.../net/unix/TestDomainSocketWatcher.java | 15 +-
.../security/TestCompositeGroupMapping.java | 7 +-
.../hadoop/security/TestDoAsEffectiveUser.java | 8 +-
.../hadoop/security/TestGroupFallback.java | 19 +-
.../hadoop/security/TestGroupsCaching.java | 8 +-
.../TestShellBasedUnixGroupsMapping.java | 8 +-
.../hadoop/security/TestUGIWithMiniKdc.java | 2 +-
.../security/TestUserGroupInformation.java | 2 +-
.../alias/TestCredentialProviderFactory.java | 17 +-
.../authorize/TestAccessControlList.java | 16 +-
.../security/authorize/TestProxyUsers.java | 8 +-
.../hadoop/security/ssl/TestSSLFactory.java | 2 +-
.../token/delegation/TestDelegationToken.java | 7 +-
.../delegation/web/TestWebDelegationToken.java | 2 +-
.../hadoop/service/TestCompositeService.java | 7 +-
.../hadoop/service/TestServiceLifecycle.java | 7 +-
.../apache/hadoop/test/GenericTestUtils.java | 71 +-
.../org/apache/hadoop/test/MetricsAsserts.java | 6 +-
.../hadoop/test/MultithreadedTestUtil.java | 8 +-
.../hadoop/test/TestGenericTestUtils.java | 15 +-
.../org/apache/hadoop/test/TestJUnitSetup.java | 7 +-
.../hadoop/util/Crc32PerformanceTest.java | 11 +-
.../hadoop/util/TestAsyncDiskService.java | 7 +-
.../org/apache/hadoop/util/TestClasspath.java | 9 +-
.../org/apache/hadoop/util/TestFindClass.java | 7 +-
.../hadoop/util/TestIdentityHashStore.java | 7 +-
.../apache/hadoop/util/TestLightWeightGSet.java | 7 +-
.../util/TestLightWeightResizableGSet.java | 7 +-
.../hadoop/util/TestNativeCodeLoader.java | 6 +-
.../hadoop/util/TestReadWriteDiskValidator.java | 5 +-
.../apache/hadoop/util/TestSignalLogger.java | 11 +-
.../org/apache/hadoop/util/TestWinUtils.java | 6 +-
.../src/test/resources/testConf.xml | 44 +-
hadoop-common-project/hadoop-nfs/pom.xml | 6 +
.../org/apache/hadoop/mount/MountdBase.java | 12 +-
.../java/org/apache/hadoop/nfs/NfsExports.java | 6 +-
.../org/apache/hadoop/nfs/nfs3/FileHandle.java | 6 +-
.../org/apache/hadoop/nfs/nfs3/Nfs3Base.java | 10 +-
.../hadoop/oncrpc/RegistrationClient.java | 7 +-
.../java/org/apache/hadoop/oncrpc/RpcCall.java | 10 +-
.../org/apache/hadoop/oncrpc/RpcProgram.java | 6 +-
.../java/org/apache/hadoop/oncrpc/RpcUtil.java | 11 +-
.../hadoop/oncrpc/SimpleTcpClientHandler.java | 7 +-
.../apache/hadoop/oncrpc/SimpleTcpServer.java | 7 +-
.../apache/hadoop/oncrpc/SimpleUdpServer.java | 7 +-
.../hadoop/oncrpc/security/Credentials.java | 6 +-
.../hadoop/oncrpc/security/SecurityHandler.java | 7 +-
.../java/org/apache/hadoop/portmap/Portmap.java | 8 +-
.../hadoop/portmap/RpcProgramPortmap.java | 7 +-
.../apache/hadoop/oncrpc/TestFrameDecoder.java | 6 +-
.../hadoop/fs/http/client/HttpFSFileSystem.java | 42 +
.../hadoop/fs/http/server/FSOperations.java | 52 +-
.../http/server/HttpFSAuthenticationFilter.java | 6 +-
.../http/server/HttpFSParametersProvider.java | 3 +-
.../hadoop/fs/http/server/HttpFSServer.java | 21 +-
.../fs/http/server/HttpFSServerWebServer.java | 15 +-
.../libexec/shellprofile.d/hadoop-httpfs.sh | 17 -
.../src/main/resources/httpfs-default.xml | 8 +-
.../src/site/markdown/ServerSetup.md.vm | 11 +-
.../fs/http/client/BaseTestHttpFSWith.java | 115 ++-
...KerberosAuthenticationHandlerForTesting.java | 1 -
.../hadoop/fs/http/server/TestHttpFSServer.java | 264 +++---
.../http/server/TestHttpFSServerWebServer.java | 2 +-
.../jdiff/Apache_Hadoop_HDFS_3.0.0-alpha4.xml | 322 +++++++
.../server/blockmanagement/BlockCollection.java | 24 +-
.../hdfs/server/datanode/BPOfferService.java | 47 +-
.../hadoop/hdfs/server/datanode/DataNode.java | 8 +
.../hdfs/server/datanode/DataNodeMXBean.java | 7 +
.../datanode/fsdataset/impl/FsDatasetImpl.java | 2 +
.../ContentSummaryComputationContext.java | 23 +-
.../namenode/DirectoryWithQuotaFeature.java | 4 +-
.../server/namenode/FSDirStatAndListingOp.java | 9 +-
.../server/namenode/FSPermissionChecker.java | 32 +
.../hadoop/hdfs/server/namenode/INode.java | 9 +-
.../hdfs/server/namenode/INodeDirectory.java | 9 +-
.../hdfs/server/namenode/INodeReference.java | 3 +-
.../hadoop/hdfs/server/namenode/NameNode.java | 5 +-
.../hdfs/server/namenode/SecondaryNameNode.java | 10 +
.../snapshot/DirectorySnapshottableFeature.java | 3 +-
.../snapshot/DirectoryWithSnapshotFeature.java | 3 +-
.../hdfs/server/namenode/snapshot/Snapshot.java | 4 +-
.../org/apache/hadoop/hdfs/tools/DFSAdmin.java | 57 +-
.../hadoop-hdfs/src/main/webapps/datanode/dn.js | 2 +-
.../fs/TestHDFSFileContextMainOperations.java | 46 +-
.../hadoop/hdfs/TestEncryptionZonesWithKMS.java | 19 +-
.../blockmanagement/TestBlockManager.java | 4 +-
.../server/datanode/TestBPOfferService.java | 29 +
.../server/datanode/TestDataNodeMXBean.java | 4 +
.../TestGetContentSummaryWithPermission.java | 201 +++++
.../namenode/TestNameNodeHttpServerXFrame.java | 22 +
.../namenode/TestNameNodeReconfigure.java | 11 +
.../hadoop/mapreduce/v2/util/MRWebAppUtil.java | 9 +-
.../java/org/apache/hadoop/mapreduce/Job.java | 13 +-
.../hadoop/mapreduce/lib/db/DBOutputFormat.java | 15 +-
.../webapp/TestMapReduceTrackingUriPlugin.java | 26 +-
.../mapreduce/TestMapperReducerCleanup.java | 4 +
.../mapreduce/lib/db/TestDBOutputFormat.java | 45 +
hadoop-project/pom.xml | 6 +-
.../fs/azure/AzureNativeFileSystemStore.java | 78 +-
.../hadoop/fs/azure/BlockBlobInputStream.java | 396 +++++++++
.../hadoop/fs/azure/CachingAuthorizer.java | 232 +++++
.../fs/azure/LocalSASKeyGeneratorImpl.java | 28 +-
.../hadoop/fs/azure/NativeAzureFileSystem.java | 78 +-
.../fs/azure/NativeAzureFileSystemHelper.java | 28 +
.../hadoop/fs/azure/NativeFileSystemStore.java | 6 +-
.../fs/azure/RemoteSASKeyGeneratorImpl.java | 54 +-
.../fs/azure/RemoteWasbAuthorizerImpl.java | 46 +-
.../hadoop/fs/azure/SASKeyGeneratorImpl.java | 4 +-
.../fs/azure/SecureStorageInterfaceImpl.java | 5 +
.../fs/azure/SecureWasbRemoteCallHelper.java | 86 +-
.../hadoop/fs/azure/StorageInterface.java | 11 +-
.../hadoop/fs/azure/StorageInterfaceImpl.java | 5 +
.../hadoop/fs/azure/WasbRemoteCallHelper.java | 61 +-
.../hadoop/fs/azure/security/Constants.java | 19 +-
.../RemoteWasbDelegationTokenManager.java | 27 +-
.../hadoop/fs/azure/security/SpnegoToken.java | 49 ++
.../hadoop-azure/src/site/markdown/index.md | 50 ++
.../hadoop/fs/azure/AbstractWasbTestBase.java | 5 +
.../fs/azure/AzureBlobStorageTestAccount.java | 40 +-
.../hadoop/fs/azure/MockStorageInterface.java | 36 +-
.../hadoop/fs/azure/MockWasbAuthorizerImpl.java | 22 +-
.../azure/TestAzureConcurrentOutOfBandIo.java | 2 +-
.../fs/azure/TestBlockBlobInputStream.java | 756 ++++++++++++++++
.../TestNativeAzureFSAuthorizationCaching.java | 60 ++
.../TestNativeAzureFileSystemAuthorization.java | 231 +++--
...veAzureFileSystemAuthorizationWithOwner.java | 2 +-
.../fs/azure/TestWasbRemoteCallHelper.java | 64 +-
.../src/test/resources/azure-test.xml | 3 +-
.../ams/ApplicationMasterServiceContext.java | 29 +
.../ams/ApplicationMasterServiceProcessor.java | 81 ++
.../yarn/ams/ApplicationMasterServiceUtils.java | 89 ++
.../apache/hadoop/yarn/ams/package-info.java | 24 +
.../hadoop/yarn/conf/YarnConfiguration.java | 11 +-
.../yarn/client/api/impl/TimelineWriter.java | 8 +-
.../resource/DominantResourceCalculator.java | 22 +-
.../src/main/resources/yarn-default.xml | 20 +
.../timeline/RollingLevelDBTimelineStore.java | 50 +-
.../hadoop-yarn-server-common/pom.xml | 6 -
.../hadoop/yarn/server/webapp/dao/AppInfo.java | 14 +
.../nodemanager/LinuxContainerExecutor.java | 19 +-
.../PrivilegedOperationException.java | 10 +-
.../runtime/ContainerExecutionException.java | 10 +-
.../scheduler/ContainerScheduler.java | 135 ++-
.../recovery/NMLeveldbStateStoreService.java | 6 +-
.../TestLinuxContainerExecutorWithMocks.java | 89 ++
.../TestContainerManagerRecovery.java | 2 +-
.../TestContainerSchedulerQueuing.java | 85 ++
.../resourcemanager/AMSProcessingChain.java | 102 +++
.../ApplicationMasterService.java | 446 +---------
.../resourcemanager/DefaultAMSProcessor.java | 456 ++++++++++
...pportunisticContainerAllocatorAMService.java | 184 ++--
.../yarn/server/resourcemanager/RMContext.java | 3 +-
.../capacity/FifoCandidatesSelector.java | 6 +-
.../ProportionalCapacityPreemptionPolicy.java | 7 +-
.../placement/PlacementFactory.java | 45 +
.../resourcemanager/recovery/RMStateStore.java | 5 +-
.../resource/ResourceWeights.java | 4 +-
.../resourcemanager/rmapp/RMAppEvent.java | 24 -
.../resourcemanager/rmapp/RMAppEventType.java | 1 +
.../server/resourcemanager/rmapp/RMAppImpl.java | 8 +-
.../rmapp/attempt/RMAppAttemptImpl.java | 20 +-
.../scheduler/SchedulerUtils.java | 18 -
.../scheduler/activities/ActivitiesLogger.java | 33 +-
.../scheduler/capacity/CapacityScheduler.java | 42 +-
.../CapacitySchedulerConfiguration.java | 27 +
.../scheduler/capacity/LeafQueue.java | 83 +-
.../scheduler/fair/FSContext.java | 2 +-
.../scheduler/fair/FairScheduler.java | 11 +-
.../fair/policies/FairSharePolicy.java | 17 +-
.../webapp/FairSchedulerAppsBlock.java | 6 +
.../resourcemanager/webapp/NodeIDsInfo.java | 5 +-
.../resourcemanager/webapp/RMAppsBlock.java | 8 +
.../resourcemanager/webapp/dao/AppInfo.java | 13 +
.../webapp/dao/LabelsToNodesInfo.java | 6 +-
.../TestApplicationMasterService.java | 163 +++-
...pportunisticContainerAllocatorAMService.java | 8 +
.../server/resourcemanager/TestRMRestart.java | 4 +-
.../recovery/TestZKRMStateStore.java | 14 +-
.../rmapp/TestRMAppTransitions.java | 17 +
.../attempt/TestRMAppAttemptTransitions.java | 33 +-
.../CapacitySchedulerPreemptionTestBase.java | 7 +-
.../capacity/TestCapacityScheduler.java | 186 +++-
.../TestCapacitySchedulerAsyncScheduling.java | 149 ++++
...TestCapacitySchedulerSurgicalPreemption.java | 99 ++-
.../scheduler/capacity/TestLeafQueue.java | 10 +-
.../scheduler/fair/TestSchedulingPolicy.java | 19 +-
.../webapp/TestRMWebServicesApps.java | 2 +-
.../server/webproxy/amfilter/AmIpFilter.java | 60 +-
.../server/webproxy/amfilter/TestAmFilter.java | 70 +-
.../src/site/markdown/ResourceManagerHA.md | 2 +-
.../src/site/markdown/TimelineServer.md | 4 +-
444 files changed, 9525 insertions(+), 2377 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3a7f3b2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
----------------------------------------------------------------------
diff --cc hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index fe1cc6e,eb8a5c3..2055afa
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@@ -18,24 -18,6 +18,24 @@@
package org.apache.hadoop.fs;
+import com.google.common.base.Preconditions;
+import org.apache.commons.collections.map.CaseInsensitiveMap;
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
+import org.apache.commons.io.FileUtils;
- import org.apache.commons.logging.Log;
- import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.apache.hadoop.util.StringUtils;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
+
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3a7f3b2/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3a7f3b2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3a7f3b2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3a7f3b2/hadoop-project/pom.xml
----------------------------------------------------------------------
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[49/50] [abbrv] hadoop git commit: Merge branch 'HDFS-7240' of
https://git-wip-us.apache.org/repos/asf/hadoop into HDFS-7240
Posted by xy...@apache.org.
Merge branch 'HDFS-7240' of https://git-wip-us.apache.org/repos/asf/hadoop into HDFS-7240
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84e11c7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84e11c7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84e11c7c
Branch: refs/heads/HDFS-7240
Commit: 84e11c7c01a928e9e101d3ca0a7be0547479ca12
Parents: 9a4246c a715f60
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed Jul 19 14:47:05 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Wed Jul 19 14:47:05 2017 -0700
----------------------------------------------------------------------
.../hadoop/util/concurrent/HadoopExecutors.java | 41 ++
.../org/apache/hadoop/scm/ScmConfigKeys.java | 23 +
.../server/datanode/ObjectStoreHandler.java | 3 +-
.../checker/StorageLocationChecker.java | 8 +-
.../org/apache/hadoop/ozone/OzoneBucket.java | 117 ++++
.../org/apache/hadoop/ozone/OzoneClient.java | 608 ++++++++++++-------
.../apache/hadoop/ozone/OzoneClientImpl.java | 519 ++++++++++++++++
.../apache/hadoop/ozone/OzoneConfigKeys.java | 9 +
.../apache/hadoop/ozone/OzoneConfiguration.java | 7 +
.../java/org/apache/hadoop/ozone/OzoneKey.java | 120 ++++
.../org/apache/hadoop/ozone/OzoneVolume.java | 107 ++++
.../common/helpers/ContainerUtils.java | 16 +-
.../container/common/helpers/KeyUtils.java | 20 +-
.../common/impl/ContainerManagerImpl.java | 6 +-
.../container/common/impl/KeyManagerImpl.java | 57 +-
.../container/common/interfaces/KeyManager.java | 13 +-
.../container/common/utils/ContainerCache.java | 14 +-
.../hadoop/ozone/io/OzoneInputStream.java | 52 ++
.../hadoop/ozone/io/OzoneOutputStream.java | 62 ++
.../apache/hadoop/ozone/io/package-info.java | 23 +
.../apache/hadoop/ozone/ksm/BucketManager.java | 2 +-
.../org/apache/hadoop/ozone/ksm/KeyManager.java | 1 +
.../hadoop/ozone/ksm/MetadataManager.java | 32 +-
.../hadoop/ozone/ksm/MetadataManagerImpl.java | 181 +++---
.../apache/hadoop/ozone/ksm/VolumeManager.java | 3 +-
.../hadoop/ozone/ksm/VolumeManagerImpl.java | 50 +-
.../ozone/scm/StorageContainerManager.java | 10 +-
.../ozone/scm/block/BlockManagerImpl.java | 67 +-
.../org/apache/hadoop/ozone/scm/cli/SQLCLI.java | 112 ++--
.../ozone/scm/container/ContainerMapping.java | 19 +-
.../ContainerReplicationManager.java | 306 ++++++++++
.../container/replication/InProgressPool.java | 302 +++++++++
.../scm/container/replication/PeriodicPool.java | 119 ++++
.../scm/container/replication/package-info.java | 23 +
.../ozone/scm/exceptions/SCMException.java | 3 +-
.../hadoop/ozone/scm/node/CommandQueue.java | 126 +++-
.../hadoop/ozone/scm/node/NodeManager.java | 10 +-
.../hadoop/ozone/scm/node/NodePoolManager.java | 5 +-
.../hadoop/ozone/scm/node/SCMNodeManager.java | 51 +-
.../ozone/scm/node/SCMNodePoolManager.java | 53 +-
.../hadoop/ozone/web/client/OzoneBucket.java | 22 +-
.../ozone/web/client/OzoneRestClient.java | 17 +-
.../hadoop/ozone/web/client/OzoneVolume.java | 6 +-
.../web/handlers/BucketProcessTemplate.java | 20 +-
.../ozone/web/handlers/KeyProcessTemplate.java | 7 +
.../web/handlers/VolumeProcessTemplate.java | 5 +-
.../web/localstorage/OzoneMetadataManager.java | 133 ++--
.../apache/hadoop/ozone/web/ozShell/Shell.java | 2 +
.../web/ozShell/bucket/ListBucketHandler.java | 2 +
.../web/ozShell/volume/ListVolumeHandler.java | 13 +-
.../web/ozShell/volume/UpdateVolumeHandler.java | 4 +-
.../hadoop/ozone/web/utils/OzoneUtils.java | 24 +
.../org/apache/hadoop/utils/BatchOperation.java | 90 +++
.../org/apache/hadoop/utils/EntryConsumer.java | 38 ++
.../apache/hadoop/utils/LevelDBKeyFilters.java | 65 --
.../org/apache/hadoop/utils/LevelDBStore.java | 182 +++---
.../apache/hadoop/utils/MetadataKeyFilters.java | 65 ++
.../org/apache/hadoop/utils/MetadataStore.java | 152 +++++
.../hadoop/utils/MetadataStoreBuilder.java | 96 +++
.../StorageContainerDatanodeProtocol.proto | 5 +-
.../src/main/resources/ozone-default.xml | 42 ++
.../hadoop-hdfs/src/site/markdown/OzoneRest.md | 14 +-
.../apache/hadoop/ozone/MiniOzoneCluster.java | 12 +
.../apache/hadoop/ozone/RatisTestHelper.java | 80 ++-
.../apache/hadoop/ozone/TestLevelDBStore.java | 165 -----
.../apache/hadoop/ozone/TestMetadataStore.java | 296 +++++++++
.../apache/hadoop/ozone/TestOzoneClient.java | 190 ------
.../hadoop/ozone/TestOzoneClientImpl.java | 214 +++++++
.../ReplicationDatanodeStateManager.java | 92 +++
.../TestUtils/ReplicationNodeManagerMock.java | 315 ++++++++++
.../ReplicationNodePoolManagerMock.java | 132 ++++
.../ozone/container/TestUtils/package-info.java | 18 +
.../ozone/container/common/SCMTestUtils.java | 8 +-
.../ozone/container/common/TestEndPoint.java | 12 +-
.../common/impl/TestContainerPersistence.java | 55 +-
.../TestContainerReplicationManager.java | 260 ++++++++
.../container/replication/package-info.java | 18 +
.../hadoop/ozone/ksm/TestKeySpaceManager.java | 15 +-
.../hadoop/ozone/ozShell/TestOzoneShell.java | 440 +++++++++++++-
.../apache/hadoop/ozone/scm/TestSCMMXBean.java | 5 +-
.../ozone/scm/container/MockNodeManager.java | 11 +
.../hadoop/ozone/web/client/TestBuckets.java | 32 +-
.../ozone/web/client/TestBucketsRatis.java | 75 +++
.../hadoop/ozone/web/client/TestKeys.java | 92 ++-
.../hadoop/ozone/web/client/TestKeysRatis.java | 88 +++
.../hadoop/ozone/web/client/TestVolume.java | 76 ++-
.../ozone/web/client/TestVolumeRatis.java | 99 +++
.../src/test/resources/log4j.properties | 2 +-
88 files changed, 5839 insertions(+), 1267 deletions(-)
----------------------------------------------------------------------
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[46/50] [abbrv] hadoop git commit: HDFS-12139. HTTPFS liststatus
returns incorrect pathSuffix for path of file. Contributed by Yongjun Zhang.
Posted by xy...@apache.org.
HDFS-12139. HTTPFS liststatus returns incorrect pathSuffix for path of file. Contributed by Yongjun Zhang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3556e36b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3556e36b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3556e36b
Branch: refs/heads/HDFS-7240
Commit: 3556e36be30211f46ac38899ce11a4d4cac6d635
Parents: 413b23e
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Wed Jul 19 10:54:13 2017 -0700
Committer: Yongjun Zhang <yz...@cloudera.com>
Committed: Wed Jul 19 10:56:50 2017 -0700
----------------------------------------------------------------------
.../hadoop/fs/http/server/FSOperations.java | 15 ++++++-----
.../fs/http/client/BaseTestHttpFSWith.java | 26 +++++++++++++++++++-
2 files changed, 34 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3556e36b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 0fb665a..f1615c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -75,15 +75,17 @@ public class FSOperations {
/**
* @param fileStatuses list of FileStatus objects
+ * @param isFile is the fileStatuses from a file path
* @return JSON map suitable for wire transport
*/
@SuppressWarnings({"unchecked"})
- private static Map<String, Object> toJson(FileStatus[] fileStatuses) {
+ private static Map<String, Object> toJson(FileStatus[] fileStatuses,
+ boolean isFile) {
Map<String, Object> json = new LinkedHashMap<>();
Map<String, Object> inner = new LinkedHashMap<>();
JSONArray statuses = new JSONArray();
for (FileStatus f : fileStatuses) {
- statuses.add(toJsonInner(f, false));
+ statuses.add(toJsonInner(f, isFile));
}
inner.put(HttpFSFileSystem.FILE_STATUS_JSON, statuses);
json.put(HttpFSFileSystem.FILE_STATUSES_JSON, inner);
@@ -129,13 +131,14 @@ public class FSOperations {
* These two classes are slightly different, due to the impedance
* mismatches between the WebHDFS and FileSystem APIs.
* @param entries
+ * @param isFile is the entries from a file path
* @return json
*/
private static Map<String, Object> toJson(FileSystem.DirectoryEntries
- entries) {
+ entries, boolean isFile) {
Map<String, Object> json = new LinkedHashMap<>();
Map<String, Object> inner = new LinkedHashMap<>();
- Map<String, Object> fileStatuses = toJson(entries.getEntries());
+ Map<String, Object> fileStatuses = toJson(entries.getEntries(), isFile);
inner.put(HttpFSFileSystem.PARTIAL_LISTING_JSON, fileStatuses);
inner.put(HttpFSFileSystem.REMAINING_ENTRIES_JSON, entries.hasMore() ? 1
: 0);
@@ -690,7 +693,7 @@ public class FSOperations {
@Override
public Map execute(FileSystem fs) throws IOException {
FileStatus[] fileStatuses = fs.listStatus(path, filter);
- return toJson(fileStatuses);
+ return toJson(fileStatuses, fs.getFileStatus(path).isFile());
}
@Override
@@ -735,7 +738,7 @@ public class FSOperations {
WrappedFileSystem wrappedFS = new WrappedFileSystem(fs);
FileSystem.DirectoryEntries entries =
wrappedFS.listStatusBatch(path, token);
- return toJson(entries);
+ return toJson(entries, wrappedFS.getFileStatus(path).isFile());
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3556e36b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index 0fd3f91..e23093e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -364,8 +364,15 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
assertEquals(status2.getLen(), status1.getLen());
FileStatus[] stati = fs.listStatus(path.getParent());
- assertEquals(stati.length, 1);
+ assertEquals(1, stati.length);
assertEquals(stati[0].getPath().getName(), path.getName());
+
+ // The full path should be the path to the file. See HDFS-12139
+ FileStatus[] statl = fs.listStatus(path);
+ Assert.assertEquals(1, statl.length);
+ Assert.assertEquals(status2.getPath(), statl[0].getPath());
+ Assert.assertEquals(statl[0].getPath().getName(), path.getName());
+ Assert.assertEquals(stati[0].getPath(), statl[0].getPath());
}
private static void assertSameListing(FileSystem expected, FileSystem
@@ -411,6 +418,23 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
proxyFs.create(new Path(dir, "file" + i)).close();
assertSameListing(proxyFs, httpFs, dir);
}
+
+ // Test for HDFS-12139
+ Path dir1 = new Path(getProxiedFSTestDir(), "dir1");
+ proxyFs.mkdirs(dir1);
+ Path file1 = new Path(dir1, "file1");
+ proxyFs.create(file1).close();
+
+ RemoteIterator<FileStatus> si = proxyFs.listStatusIterator(dir1);
+ FileStatus statusl = si.next();
+ FileStatus status = proxyFs.getFileStatus(file1);
+ Assert.assertEquals(file1.getName(), statusl.getPath().getName());
+ Assert.assertEquals(status.getPath(), statusl.getPath());
+
+ si = proxyFs.listStatusIterator(file1);
+ statusl = si.next();
+ Assert.assertEquals(file1.getName(), statusl.getPath().getName());
+ Assert.assertEquals(status.getPath(), statusl.getPath());
}
private void testWorkingdirectory() throws Exception {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[04/50] [abbrv] hadoop git commit: YARN-5731. Preemption calculation
is not accurate when reserved containers are present in queue. Contributed by
Wangda Tan.
Posted by xy...@apache.org.
YARN-5731. Preemption calculation is not accurate when reserved containers are present in queue. Contributed by Wangda Tan.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf0d0844
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf0d0844
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf0d0844
Branch: refs/heads/HDFS-7240
Commit: cf0d0844d6ae25d537391edb9b65fca05d1848e6
Parents: e15e271
Author: Sunil G <su...@apache.org>
Authored: Thu Jul 13 16:48:29 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Thu Jul 13 16:48:29 2017 +0530
----------------------------------------------------------------------
.../capacity/FifoCandidatesSelector.java | 6 +-
.../ProportionalCapacityPreemptionPolicy.java | 22 ++++-
.../CapacitySchedulerPreemptionTestBase.java | 7 +-
...TestCapacitySchedulerSurgicalPreemption.java | 97 +++++++++++++++++++-
4 files changed, 125 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf0d0844/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
index f4d7e92..f843db4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
@@ -43,12 +43,12 @@ public class FifoCandidatesSelector
LogFactory.getLog(FifoCandidatesSelector.class);
private PreemptableResourceCalculator preemptableAmountCalculator;
- FifoCandidatesSelector(
- CapacitySchedulerPreemptionContext preemptionContext) {
+ FifoCandidatesSelector(CapacitySchedulerPreemptionContext preemptionContext,
+ boolean includeReservedResource) {
super(preemptionContext);
preemptableAmountCalculator = new PreemptableResourceCalculator(
- preemptionContext, false);
+ preemptionContext, includeReservedResource);
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf0d0844/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 76d6637..719d2eb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -232,7 +232,27 @@ public class ProportionalCapacityPreemptionPolicy
}
// initialize candidates preemption selection policies
- candidatesSelectionPolicies.add(new FifoCandidatesSelector(this));
+ // When select candidates for reserved containers is enabled, exclude reserved
+ // resource in fifo policy (less aggressive). Otherwise include reserved
+ // resource.
+ //
+ // Why doing this? In YARN-4390, we added preemption-based-on-reserved-container
+ // Support. To reduce unnecessary preemption for large containers. We will
+ // not include reserved resources while calculating ideal-allocation in
+ // FifoCandidatesSelector.
+ //
+ // Changes in YARN-4390 will significantly reduce number of containers preempted
+ // When cluster has heterogeneous container requests. (Please check test
+ // report: https://issues.apache.org/jira/secure/attachment/12796197/YARN-4390-test-results.pdf
+ //
+ // However, on the other hand, in some corner cases, especially for
+ // fragmented cluster. It could lead to preemption cannot kick in in some
+ // cases. Please see YARN-5731.
+ //
+ // So to solve the problem, we will include reserved when surgical preemption
+ // for reserved container, which reverts behavior when YARN-4390 is disabled.
+ candidatesSelectionPolicies.add(new FifoCandidatesSelector(this,
+ !selectCandidatesForResevedContainers));
// Do we need to specially consider intra queue
boolean isIntraQueuePreemptionEnabled = csConfig.getBoolean(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf0d0844/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerPreemptionTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerPreemptionTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerPreemptionTestBase.java
index 943b7d2..55ccb8a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerPreemptionTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerPreemptionTestBase.java
@@ -131,9 +131,10 @@ public class CapacitySchedulerPreemptionTestBase {
public void waitNumberOfLiveContainersOnNodeFromApp(FiCaSchedulerNode node,
ApplicationAttemptId appId, int expected) throws InterruptedException {
int waitNum = 0;
+ int total = 0;
while (waitNum < 500) {
- int total = 0;
+ total = 0;
for (RMContainer c : node.getCopiedListOfRunningContainers()) {
if (c.getApplicationAttemptId().equals(appId)) {
total++;
@@ -146,7 +147,9 @@ public class CapacitySchedulerPreemptionTestBase {
waitNum++;
}
- Assert.fail();
+ Assert.fail(
+ "Check #live-container-on-node-from-app, actual=" + total + " expected="
+ + expected);
}
public void checkNumberOfPreemptionCandidateFromApp(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf0d0844/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java
index 4a37bef..afd2f82 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java
@@ -36,11 +36,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateS
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.Assert;
import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.List;
import java.util.Set;
public class TestCapacitySchedulerSurgicalPreemption
@@ -811,4 +811,99 @@ public class TestCapacitySchedulerSurgicalPreemption
rm1.close();
}
+ @Test(timeout = 60000)
+ public void testPreemptionForFragmentatedCluster() throws Exception {
+ conf.setBoolean(
+ CapacitySchedulerConfiguration.PREEMPTION_SELECT_CANDIDATES_FOR_RESERVED_CONTAINERS,
+ false);
+
+ /**
+ * Two queues, a/b, each of them are 50/50
+ * 5 nodes in the cluster, each of them is 30G.
+ *
+ * Submit first app, AM = 3G, and 4 * 21G containers.
+ * Submit second app, AM = 3G, and 4 * 21G containers,
+ *
+ * We can get one container preempted from 1st app.
+ */
+ CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(
+ this.conf);
+ conf.setLong(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
+ 1024 * 21);
+ conf.setQueues("root", new String[] { "a", "b" });
+ conf.setCapacity("root.a", 50);
+ conf.setUserLimitFactor("root.a", 100);
+ conf.setCapacity("root.b", 50);
+ conf.setUserLimitFactor("root.b", 100);
+ MockRM rm1 = new MockRM(conf);
+ rm1.getRMContext().setNodeLabelManager(mgr);
+ rm1.start();
+
+ List<MockNM> nms = new ArrayList<>();
+ for (int i = 0; i < 5; i++) {
+ nms.add(rm1.registerNode("h" + i + ":1234", 30 * GB));
+ }
+
+ CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
+
+ // launch an app to queue, AM container should be launched in nm1
+ RMApp app1 = rm1.submitApp(3 * GB, "app", "user", null, "a");
+ MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nms.get(0));
+
+ am1.allocate("*", 21 * GB, 4, new ArrayList<ContainerId>());
+
+ // Do allocation for all nodes
+ for (int i = 0; i < 10; i++) {
+ MockNM mockNM = nms.get(i % nms.size());
+ RMNode rmNode = cs.getRMContext().getRMNodes().get(mockNM.getNodeId());
+ cs.handle(new NodeUpdateSchedulerEvent(rmNode));
+ }
+
+ // App1 should have 5 containers now
+ FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(
+ am1.getApplicationAttemptId());
+ Assert.assertEquals(5, schedulerApp1.getLiveContainers().size());
+
+ // launch an app to queue, AM container should be launched in nm1
+ RMApp app2 = rm1.submitApp(3 * GB, "app", "user", null, "b");
+ MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nms.get(2));
+
+ am2.allocate("*", 21 * GB, 4, new ArrayList<ContainerId>());
+
+ // Do allocation for all nodes
+ for (int i = 0; i < 10; i++) {
+ MockNM mockNM = nms.get(i % nms.size());
+ RMNode rmNode = cs.getRMContext().getRMNodes().get(mockNM.getNodeId());
+ cs.handle(new NodeUpdateSchedulerEvent(rmNode));
+ }
+
+ // App2 should have 2 containers now
+ FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(
+ am2.getApplicationAttemptId());
+ Assert.assertEquals(2, schedulerApp2.getLiveContainers().size());
+
+ waitNumberOfReservedContainersFromApp(schedulerApp2, 1);
+
+ // Call editSchedule twice and allocation once, container should get allocated
+ SchedulingEditPolicy editPolicy = getSchedulingEditPolicy(rm1);
+ editPolicy.editSchedule();
+ editPolicy.editSchedule();
+
+ int tick = 0;
+ while (schedulerApp2.getLiveContainers().size() != 4 && tick < 10) {
+ // Do allocation for all nodes
+ for (int i = 0; i < 10; i++) {
+ MockNM mockNM = nms.get(i % nms.size());
+ RMNode rmNode = cs.getRMContext().getRMNodes().get(mockNM.getNodeId());
+ cs.handle(new NodeUpdateSchedulerEvent(rmNode));
+ }
+ tick++;
+ Thread.sleep(100);
+ }
+ Assert.assertEquals(3, schedulerApp2.getLiveContainers().size());
+
+ rm1.close();
+ }
+
+
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[21/50] [abbrv] hadoop git commit: HDFS-12130. Optimizing permission
check for getContentSummary. Contributed by Chen Liang
Posted by xy...@apache.org.
HDFS-12130. Optimizing permission check for getContentSummary. Contributed by Chen Liang
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f413ee33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f413ee33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f413ee33
Branch: refs/heads/HDFS-7240
Commit: f413ee33df301659c4ca9024380c2354983dcc84
Parents: a1f12bb
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Fri Jul 14 14:35:51 2017 -0700
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Fri Jul 14 14:35:51 2017 -0700
----------------------------------------------------------------------
.../server/blockmanagement/BlockCollection.java | 4 +-
.../ContentSummaryComputationContext.java | 20 ++
.../namenode/DirectoryWithQuotaFeature.java | 4 +-
.../server/namenode/FSDirStatAndListingOp.java | 9 +-
.../server/namenode/FSPermissionChecker.java | 32 +++
.../hadoop/hdfs/server/namenode/INode.java | 9 +-
.../hdfs/server/namenode/INodeDirectory.java | 9 +-
.../hdfs/server/namenode/INodeReference.java | 3 +-
.../snapshot/DirectorySnapshottableFeature.java | 3 +-
.../snapshot/DirectoryWithSnapshotFeature.java | 3 +-
.../hdfs/server/namenode/snapshot/Snapshot.java | 4 +-
.../TestGetContentSummaryWithPermission.java | 201 +++++++++++++++++++
12 files changed, 285 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 2f214be..b880590 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -21,6 +21,7 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.security.AccessControlException;
/**
* This interface is used by the block manager to expose a
@@ -36,7 +37,8 @@ public interface BlockCollection {
/**
* Get content summary.
*/
- public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps);
+ public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps)
+ throws AccessControlException;
/**
* @return the number of blocks or block groups
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 8d5aa0d..43e6f0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -20,11 +20,14 @@ package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.security.AccessControlException;
+
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
@@ -46,6 +49,8 @@ public class ContentSummaryComputationContext {
public static final String REPLICATED = "Replicated";
public static final Log LOG = LogFactory.getLog(INode.class);
+
+ private FSPermissionChecker pc;
/**
* Constructor
*
@@ -57,6 +62,12 @@ public class ContentSummaryComputationContext {
*/
public ContentSummaryComputationContext(FSDirectory dir,
FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
+ this(dir, fsn, limitPerRun, sleepMicroSec, null);
+ }
+
+ public ContentSummaryComputationContext(FSDirectory dir,
+ FSNamesystem fsn, long limitPerRun, long sleepMicroSec,
+ FSPermissionChecker pc) {
this.dir = dir;
this.fsn = fsn;
this.limitPerRun = limitPerRun;
@@ -65,6 +76,7 @@ public class ContentSummaryComputationContext {
this.snapshotCounts = new ContentCounts.Builder().build();
this.sleepMilliSec = sleepMicroSec/1000;
this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
+ this.pc = pc;
}
/** Constructor for blocking computation. */
@@ -186,4 +198,12 @@ public class ContentSummaryComputationContext {
}
return "";
}
+
+ void checkPermission(INodeDirectory inode, int snapshotId, FsAction access)
+ throws AccessControlException {
+ if (dir != null && dir.isPermissionEnabled()
+ && pc != null && !pc.isSuperUser()) {
+ pc.checkPermission(inode, snapshotId, access);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
index 31b45ad..0968c65 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.EnumCounters;
+import org.apache.hadoop.security.AccessControlException;
/**
* Quota feature for {@link INodeDirectory}.
@@ -125,7 +126,8 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
}
ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir,
- final ContentSummaryComputationContext summary) {
+ final ContentSummaryComputationContext summary)
+ throws AccessControlException {
final long original = summary.getCounts().getStoragespace();
long oldYieldCount = summary.getYieldCount();
dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 04efa65..4c92249 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -127,10 +127,8 @@ class FSDirStatAndListingOp {
FSDirectory fsd, String src) throws IOException {
FSPermissionChecker pc = fsd.getPermissionChecker();
final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ_LINK);
- if (fsd.isPermissionEnabled()) {
- fsd.checkPermission(pc, iip, false, null, null, null,
- FsAction.READ_EXECUTE);
- }
+ // getContentSummaryInt() call will check access (if enabled) when
+ // traversing all sub directories.
return getContentSummaryInt(fsd, iip);
}
@@ -513,7 +511,8 @@ class FSDirStatAndListingOp {
// processed. 0 means disabled. I.e. blocking for the entire duration.
ContentSummaryComputationContext cscc =
new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
- fsd.getContentCountLimit(), fsd.getContentSleepMicroSec());
+ fsd.getContentCountLimit(), fsd.getContentSleepMicroSec(),
+ fsd.getPermissionChecker());
ContentSummary cs = targetNode.computeAndConvertContentSummary(
iip.getPathSnapshotId(), cscc);
fsd.addYieldCount(cscc.getYieldCount());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index f1250dd..f745a6c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -195,6 +195,38 @@ class FSPermissionChecker implements AccessControlEnforcer {
ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
}
+ /**
+ * Check permission only for the given inode (not checking the children's
+ * access).
+ *
+ * @param inode the inode to check.
+ * @param snapshotId the snapshot id.
+ * @param access the target access.
+ * @throws AccessControlException
+ */
+ void checkPermission(INode inode, int snapshotId, FsAction access)
+ throws AccessControlException {
+ try {
+ byte[][] localComponents = {inode.getLocalNameBytes()};
+ INodeAttributes[] iNodeAttr = {inode.getSnapshotINode(snapshotId)};
+ AccessControlEnforcer enforcer = getAccessControlEnforcer();
+ enforcer.checkPermission(
+ fsOwner, supergroup, callerUgi,
+ iNodeAttr, // single inode attr in the array
+ new INode[]{inode}, // single inode in the array
+ localComponents, snapshotId,
+ null, -1, // this will skip checkTraverse() because
+ // not checking ancestor here
+ false, null, null,
+ access, // the target access to be checked against the inode
+ null, // passing null sub access avoids checking children
+ false);
+ } catch (AccessControlException ace) {
+ throw new AccessControlException(
+ toAccessControlString(inode, inode.getFullPathName(), access));
+ }
+ }
+
@Override
public void checkPermission(String fsOwner, String supergroup,
UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 1f982ca..d768e08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.Diff;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.ChunkedArrayList;
import org.apache.hadoop.util.StringUtils;
@@ -418,7 +419,8 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
public abstract void destroyAndCollectBlocks(ReclaimContext reclaimContext);
/** Compute {@link ContentSummary}. Blocking call */
- public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) {
+ public final ContentSummary computeContentSummary(
+ BlockStoragePolicySuite bsps) throws AccessControlException {
return computeAndConvertContentSummary(Snapshot.CURRENT_STATE_ID,
new ContentSummaryComputationContext(bsps));
}
@@ -427,7 +429,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* Compute {@link ContentSummary}.
*/
public final ContentSummary computeAndConvertContentSummary(int snapshotId,
- ContentSummaryComputationContext summary) {
+ ContentSummaryComputationContext summary) throws AccessControlException {
computeContentSummary(snapshotId, summary);
final ContentCounts counts = summary.getCounts();
final ContentCounts snapshotCounts = summary.getSnapshotCounts();
@@ -461,7 +463,8 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* @return The same objects as summary.
*/
public abstract ContentSummaryComputationContext computeContentSummary(
- int snapshotId, ContentSummaryComputationContext summary);
+ int snapshotId, ContentSummaryComputationContext summary)
+ throws AccessControlException;
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 4012783..3b7fa4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -26,6 +26,7 @@ import java.util.List;
import java.util.Map;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.security.AccessControlException;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
@@ -632,7 +634,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
@Override
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
- ContentSummaryComputationContext summary) {
+ ContentSummaryComputationContext summary) throws AccessControlException {
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf != null && snapshotId == Snapshot.CURRENT_STATE_ID) {
final ContentCounts counts = new ContentCounts.Builder().build();
@@ -654,7 +656,10 @@ public class INodeDirectory extends INodeWithAdditionalFields
}
protected ContentSummaryComputationContext computeDirectoryContentSummary(
- ContentSummaryComputationContext summary, int snapshotId) {
+ ContentSummaryComputationContext summary, int snapshotId)
+ throws AccessControlException{
+ // throws exception if failing the permission check
+ summary.checkPermission(this, snapshotId, FsAction.READ_EXECUTE);
ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
// Explicit traversing is done to enable repositioning after relinquishing
// and reacquiring locks.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index 1b85237..db2026d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeat
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.security.AccessControlException;
/**
* An anonymous reference to an inode.
@@ -314,7 +315,7 @@ public abstract class INodeReference extends INode {
@Override
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
- ContentSummaryComputationContext summary) {
+ ContentSummaryComputationContext summary) throws AccessControlException {
return referred.computeContentSummary(snapshotId, summary);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index fbfc278..0ab928d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
@@ -234,7 +235,7 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
@Override
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
- final ContentCounts counts) {
+ final ContentCounts counts) throws AccessControlException {
counts.addContent(Content.SNAPSHOT, snapshotsByNames.size());
counts.addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
super.computeContentSummary4Snapshot(bsps, counts);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 0111b3b..7535879 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.util.Diff.UndoInfo;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.security.AccessControlException;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
@@ -630,7 +631,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
}
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
- final ContentCounts counts) {
+ final ContentCounts counts) throws AccessControlException {
// Create a new blank summary context for blocking processing of subtree.
ContentSummaryComputationContext summary =
new ContentSummaryComputationContext(bsps);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
index e98e766..515f164 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
+import org.apache.hadoop.security.AccessControlException;
/** Snapshot of a sub-tree in the namesystem. */
@InterfaceAudience.Private
@@ -176,7 +177,8 @@ public class Snapshot implements Comparable<byte[]> {
@Override
public ContentSummaryComputationContext computeContentSummary(
- int snapshotId, ContentSummaryComputationContext summary) {
+ int snapshotId, ContentSummaryComputationContext summary)
+ throws AccessControlException {
return computeDirectoryContentSummary(summary, snapshotId);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
new file mode 100644
index 0000000..03aa440
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.security.PrivilegedExceptionAction;
+
+import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * This class tests get content summary with permission settings.
+ */
+public class TestGetContentSummaryWithPermission {
+ protected static final short REPLICATION = 3;
+ protected static final long BLOCKSIZE = 1024;
+
+ private Configuration conf;
+ private MiniDFSCluster cluster;
+ private DistributedFileSystem dfs;
+
+ @Before
+ public void setUp() throws Exception {
+ conf = new Configuration();
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
+ cluster =
+ new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
+ cluster.waitActive();
+
+ dfs = cluster.getFileSystem();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (cluster != null) {
+ cluster.shutdown();
+ cluster = null;
+ }
+ }
+
+ /**
+ * Test getContentSummary for super user. For super user, whatever
+ * permission the directories are with, always allowed to access
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testGetContentSummarySuperUser() throws Exception {
+ final Path foo = new Path("/fooSuper");
+ final Path bar = new Path(foo, "barSuper");
+ final Path baz = new Path(bar, "bazSuper");
+ dfs.mkdirs(bar);
+ DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
+
+ ContentSummary summary;
+
+ summary = cluster.getNameNodeRpc().getContentSummary(
+ foo.toString());
+ verifySummary(summary, 2, 1, 10);
+
+ dfs.setPermission(foo, new FsPermission((short)0));
+
+ summary = cluster.getNameNodeRpc().getContentSummary(
+ foo.toString());
+ verifySummary(summary, 2, 1, 10);
+
+ dfs.setPermission(bar, new FsPermission((short)0));
+
+ summary = cluster.getNameNodeRpc().getContentSummary(
+ foo.toString());
+ verifySummary(summary, 2, 1, 10);
+
+ dfs.setPermission(baz, new FsPermission((short)0));
+
+ summary = cluster.getNameNodeRpc().getContentSummary(
+ foo.toString());
+ verifySummary(summary, 2, 1, 10);
+ }
+
+ /**
+ * Test getContentSummary for non-super, non-owner. Such users are restricted
+ * by permission of subdirectories. Namely if there is any subdirectory that
+ * does not have READ_EXECUTE access, AccessControlException will be thrown.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testGetContentSummaryNonSuperUser() throws Exception {
+ final Path foo = new Path("/fooNoneSuper");
+ final Path bar = new Path(foo, "barNoneSuper");
+ final Path baz = new Path(bar, "bazNoneSuper");
+ // run as some random non-superuser, non-owner user.
+ final UserGroupInformation userUgi =
+ UserGroupInformation.createUserForTesting(
+ "randomUser", new String[]{"randomGroup"});
+ dfs.mkdirs(bar);
+ DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
+
+ // by default, permission is rwxr-xr-x, as long as READ and EXECUTE are set,
+ // content summary should accessible
+ FileStatus fileStatus;
+ fileStatus = dfs.getFileStatus(foo);
+ assertEquals((short)755, fileStatus.getPermission().toOctal());
+ fileStatus = dfs.getFileStatus(bar);
+ assertEquals((short)755, fileStatus.getPermission().toOctal());
+ // file has no EXECUTE, it is rw-r--r-- default
+ fileStatus = dfs.getFileStatus(baz);
+ assertEquals((short)644, fileStatus.getPermission().toOctal());
+
+ // by default, can get content summary
+ ContentSummary summary =
+ userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+ () -> cluster.getNameNodeRpc().getContentSummary(
+ foo.toString()));
+ verifySummary(summary, 2, 1, 10);
+
+ // set empty access on root dir, should disallow content summary
+ dfs.setPermission(foo, new FsPermission((short)0));
+ try {
+ userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+ () -> cluster.getNameNodeRpc().getContentSummary(
+ foo.toString()));
+ fail("Should've fail due to access control exception.");
+ } catch (AccessControlException e) {
+ assertTrue(e.getMessage().contains("Permission denied"));
+ }
+
+ // restore foo's permission to allow READ_EXECUTE
+ dfs.setPermission(foo,
+ new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
+
+ // set empty access on subdir, should disallow content summary from root dir
+ dfs.setPermission(bar, new FsPermission((short)0));
+
+ try {
+ userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+ () -> cluster.getNameNodeRpc().getContentSummary(
+ foo.toString()));
+ fail("Should've fail due to access control exception.");
+ } catch (AccessControlException e) {
+ assertTrue(e.getMessage().contains("Permission denied"));
+ }
+
+ // restore the permission of subdir to READ_EXECUTE. enable
+ // getContentSummary again for root
+ dfs.setPermission(bar,
+ new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
+
+ summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+ () -> cluster.getNameNodeRpc().getContentSummary(
+ foo.toString()));
+ verifySummary(summary, 2, 1, 10);
+
+ // permission of files under the directory does not affect
+ // getContentSummary
+ dfs.setPermission(baz, new FsPermission((short)0));
+ summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
+ () -> cluster.getNameNodeRpc().getContentSummary(
+ foo.toString()));
+ verifySummary(summary, 2, 1, 10);
+ }
+
+ private void verifySummary(ContentSummary summary, int dirCount,
+ int fileCount, int length) {
+ assertEquals(dirCount, summary.getDirectoryCount());
+ assertEquals(fileCount, summary.getFileCount());
+ assertEquals(length, summary.getLength());
+ }
+
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[17/50] [abbrv] hadoop git commit: HDFS-12137. DN dataset lock should
be fair. Contributed by Daryn Sharp.
Posted by xy...@apache.org.
HDFS-12137. DN dataset lock should be fair. Contributed by Daryn Sharp.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d86a939
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d86a939
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d86a939
Branch: refs/heads/HDFS-7240
Commit: 8d86a93915ee00318289535d9c78e48b75c8359d
Parents: a29fe10
Author: Kihwal Lee <ki...@apache.org>
Authored: Fri Jul 14 15:41:43 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Fri Jul 14 15:41:43 2017 -0500
----------------------------------------------------------------------
.../hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java | 2 ++
1 file changed, 2 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d86a939/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 208d554..2544ff5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -39,6 +39,7 @@ import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.TimeUnit;
import javax.management.NotCompliantMBeanException;
@@ -270,6 +271,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
this.datasetLock = new AutoCloseableLock(
new InstrumentedLock(getClass().getName(), LOG,
+ new ReentrantLock(true),
conf.getTimeDuration(
DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT,
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[09/50] [abbrv] hadoop git commit: Revert "YARN-6805. NPE in
LinuxContainerExecutor due to null PrivilegedOperationException exit code.
Contributed by Jason Lowe"
Posted by xy...@apache.org.
Revert "YARN-6805. NPE in LinuxContainerExecutor due to null PrivilegedOperationException exit code. Contributed by Jason Lowe"
This reverts commit f76f5c0919cdb0b032edb309d137093952e77268.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ffca5d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ffca5d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ffca5d3
Branch: refs/heads/HDFS-7240
Commit: 0ffca5d347df0acb1979dff7a07ae88ea834adc7
Parents: f76f5c0
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Thu Jul 13 17:42:38 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Thu Jul 13 17:42:38 2017 -0500
----------------------------------------------------------------------
.../nodemanager/LinuxContainerExecutor.java | 19 ++---
.../PrivilegedOperationException.java | 10 +--
.../runtime/ContainerExecutionException.java | 10 +--
.../TestLinuxContainerExecutorWithMocks.java | 89 --------------------
4 files changed, 17 insertions(+), 111 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ffca5d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 47b99c2..9a3b2d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -275,10 +275,6 @@ public class LinuxContainerExecutor extends ContainerExecutor {
}
}
- protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
- return PrivilegedOperationExecutor.getInstance(getConf());
- }
-
@Override
public void init() throws IOException {
Configuration conf = super.getConf();
@@ -289,7 +285,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
PrivilegedOperation checkSetupOp = new PrivilegedOperation(
PrivilegedOperation.OperationType.CHECK_SETUP);
PrivilegedOperationExecutor privilegedOperationExecutor =
- getPrivilegedOperationExecutor();
+ PrivilegedOperationExecutor.getInstance(conf);
privilegedOperationExecutor.executePrivilegedOperation(checkSetupOp,
false);
@@ -386,7 +382,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
try {
Configuration conf = super.getConf();
PrivilegedOperationExecutor privilegedOperationExecutor =
- getPrivilegedOperationExecutor();
+ PrivilegedOperationExecutor.getInstance(conf);
privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
initializeContainerOp, null, null, false, true);
@@ -534,9 +530,8 @@ public class LinuxContainerExecutor extends ContainerExecutor {
}
builder.append("Stack trace: "
+ StringUtils.stringifyException(e) + "\n");
- String output = e.getOutput();
- if (output!= null && !e.getOutput().isEmpty()) {
- builder.append("Shell output: " + output + "\n");
+ if (!e.getOutput().isEmpty()) {
+ builder.append("Shell output: " + e.getOutput() + "\n");
}
String diagnostics = builder.toString();
logOutput(diagnostics);
@@ -734,7 +729,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
try {
Configuration conf = super.getConf();
PrivilegedOperationExecutor privilegedOperationExecutor =
- getPrivilegedOperationExecutor();
+ PrivilegedOperationExecutor.getInstance(conf);
privilegedOperationExecutor.executePrivilegedOperation(deleteAsUserOp,
false);
@@ -764,7 +759,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
try {
PrivilegedOperationExecutor privOpExecutor =
- getPrivilegedOperationExecutor();
+ PrivilegedOperationExecutor.getInstance(super.getConf());
String results =
privOpExecutor.executePrivilegedOperation(listAsUserOp, true);
@@ -823,7 +818,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
mountCGroupsOp.appendArgs(cgroupKVs);
PrivilegedOperationExecutor privilegedOperationExecutor =
- getPrivilegedOperationExecutor();
+ PrivilegedOperationExecutor.getInstance(conf);
privilegedOperationExecutor.executePrivilegedOperation(mountCGroupsOp,
false);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ffca5d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
index 9a11194..3622489 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
public class PrivilegedOperationException extends YarnException {
private static final long serialVersionUID = 1L;
- private int exitCode = -1;
+ private Integer exitCode;
private String output;
private String errorOutput;
@@ -36,7 +36,7 @@ public class PrivilegedOperationException extends YarnException {
super(message);
}
- public PrivilegedOperationException(String message, int exitCode,
+ public PrivilegedOperationException(String message, Integer exitCode,
String output, String errorOutput) {
super(message);
this.exitCode = exitCode;
@@ -48,8 +48,8 @@ public class PrivilegedOperationException extends YarnException {
super(cause);
}
- public PrivilegedOperationException(Throwable cause, int exitCode,
- String output, String errorOutput) {
+ public PrivilegedOperationException(Throwable cause, Integer exitCode, String
+ output, String errorOutput) {
super(cause);
this.exitCode = exitCode;
this.output = output;
@@ -59,7 +59,7 @@ public class PrivilegedOperationException extends YarnException {
super(message, cause);
}
- public int getExitCode() {
+ public Integer getExitCode() {
return exitCode;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ffca5d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
index 3147277..1fbece2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
@@ -32,10 +32,10 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
@InterfaceStability.Unstable
public class ContainerExecutionException extends YarnException {
private static final long serialVersionUID = 1L;
- private static final int EXIT_CODE_UNSET = -1;
+ private static final Integer EXIT_CODE_UNSET = -1;
private static final String OUTPUT_UNSET = "<unknown>";
- private int exitCode;
+ private Integer exitCode;
private String output;
private String errorOutput;
@@ -54,7 +54,7 @@ public class ContainerExecutionException extends YarnException {
}
- public ContainerExecutionException(String message, int exitCode, String
+ public ContainerExecutionException(String message, Integer exitCode, String
output, String errorOutput) {
super(message);
this.exitCode = exitCode;
@@ -62,7 +62,7 @@ public class ContainerExecutionException extends YarnException {
this.errorOutput = errorOutput;
}
- public ContainerExecutionException(Throwable cause, int exitCode, String
+ public ContainerExecutionException(Throwable cause, Integer exitCode, String
output, String errorOutput) {
super(cause);
this.exitCode = exitCode;
@@ -70,7 +70,7 @@ public class ContainerExecutionException extends YarnException {
this.errorOutput = errorOutput;
}
- public int getExitCode() {
+ public Integer getExitCode() {
return exitCode;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ffca5d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
index cfd0e36..07134e8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
@@ -23,9 +23,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
@@ -42,7 +40,6 @@ import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
-import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -50,8 +47,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -59,7 +54,6 @@ import org.apache.hadoop.yarn.exceptions.ConfigurationException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DefaultLinuxContainerRuntime;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntime;
@@ -522,87 +516,4 @@ public class TestLinuxContainerExecutorWithMocks {
appSubmitter, cmd, "", baseDir0.toString(), baseDir1.toString()),
readMockParams());
}
-
- @Test
- public void testNoExitCodeFromPrivilegedOperation() throws Exception {
- Configuration conf = new Configuration();
- final PrivilegedOperationExecutor spyPrivilegedExecutor =
- spy(PrivilegedOperationExecutor.getInstance(conf));
- doThrow(new PrivilegedOperationException("interrupted"))
- .when(spyPrivilegedExecutor).executePrivilegedOperation(
- any(List.class), any(PrivilegedOperation.class),
- any(File.class), any(Map.class), anyBoolean(), anyBoolean());
- LinuxContainerRuntime runtime = new DefaultLinuxContainerRuntime(
- spyPrivilegedExecutor);
- runtime.initialize(conf);
- mockExec = new LinuxContainerExecutor(runtime);
- mockExec.setConf(conf);
- LinuxContainerExecutor lce = new LinuxContainerExecutor(runtime) {
- @Override
- protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
- return spyPrivilegedExecutor;
- }
- };
- lce.setConf(conf);
- InetSocketAddress address = InetSocketAddress.createUnresolved(
- "localhost", 8040);
- Path nmPrivateCTokensPath= new Path("file:///bin/nmPrivateCTokensPath");
- LocalDirsHandlerService dirService = new LocalDirsHandlerService();
- dirService.init(conf);
-
- String appSubmitter = "nobody";
- ApplicationId appId = ApplicationId.newInstance(1, 1);
- ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
- ContainerId cid = ContainerId.newContainerId(attemptId, 1);
- HashMap<String, String> env = new HashMap<>();
- Container container = mock(Container.class);
- ContainerLaunchContext context = mock(ContainerLaunchContext.class);
- when(container.getContainerId()).thenReturn(cid);
- when(container.getLaunchContext()).thenReturn(context);
- when(context.getEnvironment()).thenReturn(env);
- Path workDir = new Path("/tmp");
-
- try {
- lce.startLocalizer(new LocalizerStartContext.Builder()
- .setNmPrivateContainerTokens(nmPrivateCTokensPath)
- .setNmAddr(address)
- .setUser(appSubmitter)
- .setAppId(appId.toString())
- .setLocId("12345")
- .setDirsHandler(dirService)
- .build());
- Assert.fail("startLocalizer should have thrown an exception");
- } catch (IOException e) {
- assertTrue("Unexpected exception " + e,
- e.getMessage().contains("exitCode"));
- }
-
- lce.activateContainer(cid, new Path(workDir, "pid.txt"));
- lce.launchContainer(new ContainerStartContext.Builder()
- .setContainer(container)
- .setNmPrivateContainerScriptPath(new Path("file:///bin/echo"))
- .setNmPrivateTokensPath(new Path("file:///dev/null"))
- .setUser(appSubmitter)
- .setAppId(appId.toString())
- .setContainerWorkDir(workDir)
- .setLocalDirs(dirsHandler.getLocalDirs())
- .setLogDirs(dirsHandler.getLogDirs())
- .setFilecacheDirs(new ArrayList<>())
- .setUserLocalDirs(new ArrayList<>())
- .setContainerLocalDirs(new ArrayList<>())
- .setContainerLogDirs(new ArrayList<>())
- .build());
- lce.deleteAsUser(new DeletionAsUserContext.Builder()
- .setUser(appSubmitter)
- .setSubDir(new Path("/tmp/testdir"))
- .build());
-
- try {
- lce.mountCgroups(new ArrayList<String>(), "hierarchy");
- Assert.fail("mountCgroups should have thrown an exception");
- } catch (IOException e) {
- assertTrue("Unexpected exception " + e,
- e.getMessage().contains("exit code"));
- }
- }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[11/50] [abbrv] hadoop git commit: MAPREDUCE-6910.
MapReduceTrackingUriPlugin can not return the right URI of history server
with HTTPS. Contributed by Lantao Jin
Posted by xy...@apache.org.
MAPREDUCE-6910. MapReduceTrackingUriPlugin can not return the right URI of history server with HTTPS. Contributed by Lantao Jin
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43f05032
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43f05032
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43f05032
Branch: refs/heads/HDFS-7240
Commit: 43f0503286eccbc6bb8ae77584b635bfd0c48e50
Parents: ebc048c
Author: Ravi Prakash <ra...@altiscale.com>
Authored: Thu Jul 13 16:16:45 2017 -0700
Committer: Ravi Prakash <ra...@altiscale.com>
Committed: Thu Jul 13 16:16:45 2017 -0700
----------------------------------------------------------------------
.../hadoop/mapreduce/v2/util/MRWebAppUtil.java | 9 ++++---
.../webapp/TestMapReduceTrackingUriPlugin.java | 26 ++++++++++++++++++--
2 files changed, 29 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43f05032/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
index d367060..951c9d5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.ipc.RPCUtil;
import java.net.InetAddress;
import java.net.InetSocketAddress;
@@ -76,7 +75,9 @@ public class MRWebAppUtil {
: "http://";
}
- public static String getJHSWebappScheme() {
+ public static String getJHSWebappScheme(Configuration conf) {
+ setHttpPolicyInJHS(conf.get(JHAdminConfig.MR_HS_HTTP_POLICY,
+ JHAdminConfig.DEFAULT_MR_HS_HTTP_POLICY));
return httpPolicyInJHS == HttpConfig.Policy.HTTPS_ONLY ? "https://"
: "http://";
}
@@ -101,7 +102,7 @@ public class MRWebAppUtil {
}
public static String getJHSWebappURLWithScheme(Configuration conf) {
- return getJHSWebappScheme() + getJHSWebappURLWithoutScheme(conf);
+ return getJHSWebappScheme(conf) + getJHSWebappURLWithoutScheme(conf);
}
public static InetSocketAddress getJHSWebBindAddress(Configuration conf) {
@@ -153,7 +154,7 @@ public class MRWebAppUtil {
public static String getApplicationWebURLOnJHSWithScheme(Configuration conf,
ApplicationId appId) throws UnknownHostException {
- return getJHSWebappScheme()
+ return getJHSWebappScheme(conf)
+ getApplicationWebURLOnJHSWithoutScheme(conf, appId);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43f05032/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
index 8c3be58..9291097 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertEquals;
import java.net.URI;
import java.net.URISyntaxException;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -30,17 +31,38 @@ import org.junit.Test;
public class TestMapReduceTrackingUriPlugin {
@Test
- public void testProducesHistoryServerUriForAppId() throws URISyntaxException {
+ public void testProducesHistoryServerUriForAppId()
+ throws URISyntaxException {
final String historyAddress = "example.net:424242";
YarnConfiguration conf = new YarnConfiguration();
+ conf.set(JHAdminConfig.MR_HS_HTTP_POLICY,
+ HttpConfig.Policy.HTTP_ONLY.name());
conf.set(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, historyAddress);
MapReduceTrackingUriPlugin plugin = new MapReduceTrackingUriPlugin();
plugin.setConf(conf);
- ApplicationId id = ApplicationId.newInstance(6384623l, 5);
+ ApplicationId id = ApplicationId.newInstance(6384623L, 5);
String jobSuffix = id.toString().replaceFirst("^application_", "job_");
URI expected =
new URI("http://" + historyAddress + "/jobhistory/job/" + jobSuffix);
URI actual = plugin.getTrackingUri(id);
assertEquals(expected, actual);
}
+
+ @Test
+ public void testProducesHistoryServerUriWithHTTPS()
+ throws URISyntaxException {
+ final String historyAddress = "example.net:404040";
+ YarnConfiguration conf = new YarnConfiguration();
+ conf.set(JHAdminConfig.MR_HS_HTTP_POLICY,
+ HttpConfig.Policy.HTTPS_ONLY.name());
+ conf.set(JHAdminConfig.MR_HISTORY_WEBAPP_HTTPS_ADDRESS, historyAddress);
+ MapReduceTrackingUriPlugin plugin = new MapReduceTrackingUriPlugin();
+ plugin.setConf(conf);
+ ApplicationId id = ApplicationId.newInstance(6384623L, 5);
+ String jobSuffix = id.toString().replaceFirst("^application_", "job_");
+ URI expected =
+ new URI("https://" + historyAddress + "/jobhistory/job/" + jobSuffix);
+ URI actual = plugin.getTrackingUri(id);
+ assertEquals(expected, actual);
+ }
}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[45/50] [abbrv] hadoop git commit: HDFS-12158. Secondary Namenode's
web interface lack configs for X-FRAME-OPTIONS protection. Contributed by
Mukul Kumar Singh.
Posted by xy...@apache.org.
HDFS-12158. Secondary Namenode's web interface lack configs for X-FRAME-OPTIONS protection. Contributed by Mukul Kumar Singh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/413b23eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/413b23eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/413b23eb
Branch: refs/heads/HDFS-7240
Commit: 413b23eb04eee24275257ab462133e0818f87449
Parents: 04ff412
Author: Anu Engineer <ae...@apache.org>
Authored: Wed Jul 19 10:29:06 2017 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed Jul 19 10:29:06 2017 -0700
----------------------------------------------------------------------
.../hdfs/server/namenode/SecondaryNameNode.java | 10 +++++++++
.../namenode/TestNameNodeHttpServerXFrame.java | 22 ++++++++++++++++++++
2 files changed, 32 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/413b23eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index 6dd085a..ff83e34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -479,6 +479,16 @@ public class SecondaryNameNode implements Runnable,
DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
+ final boolean xFrameEnabled = conf.getBoolean(
+ DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED,
+ DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED_DEFAULT);
+
+ final String xFrameOptionValue = conf.getTrimmed(
+ DFSConfigKeys.DFS_XFRAME_OPTION_VALUE,
+ DFSConfigKeys.DFS_XFRAME_OPTION_VALUE_DEFAULT);
+
+ builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue);
+
infoServer = builder.build();
infoServer.setAttribute("secondary.name.node", this);
infoServer.setAttribute("name.system.image", checkpointImage);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/413b23eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
index 947e951..aaa713e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.http.HttpServer2;
@@ -32,6 +33,7 @@ import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.URL;
+import java.net.URI;
/**
* A class to test the XFrameoptions of Namenode HTTP Server. We are not reusing
@@ -94,4 +96,24 @@ public class TestNameNodeHttpServerXFrame {
conn.connect();
return conn;
}
+
+ @Test
+ public void testSecondaryNameNodeXFrame() throws IOException {
+ Configuration conf = new HdfsConfiguration();
+ FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
+
+ SecondaryNameNode sn = new SecondaryNameNode(conf);
+ sn.startInfoServer();
+ InetSocketAddress httpAddress = SecondaryNameNode.getHttpAddress(conf);
+
+ URL url = URI.create("http://" + httpAddress.getHostName()
+ + ":" + httpAddress.getPort()).toURL();
+ HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+ conn.connect();
+ String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
+ Assert.assertTrue("X-FRAME-OPTIONS is absent in the header",
+ xfoHeader != null);
+ Assert.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption
+ .SAMEORIGIN.toString()));
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[23/50] [abbrv] hadoop git commit: HADOOP-14521. KMS client needs
retry logic. Contributed by Rushabh S Shah.
Posted by xy...@apache.org.
HADOOP-14521. KMS client needs retry logic. Contributed by Rushabh S Shah.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a6d5c0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a6d5c0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a6d5c0c
Branch: refs/heads/HDFS-7240
Commit: 0a6d5c0cf1d963da9131aa12326fc576f0e92d2c
Parents: f413ee3
Author: Xiao Chen <xi...@apache.org>
Authored: Fri Jul 14 22:14:29 2017 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri Jul 14 22:22:53 2017 -0700
----------------------------------------------------------------------
.../crypto/key/kms/KMSClientProvider.java | 39 ++-
.../key/kms/LoadBalancingKMSClientProvider.java | 78 ++++-
.../fs/CommonConfigurationKeysPublic.java | 29 ++
.../src/main/resources/core-default.xml | 28 ++
.../kms/TestLoadBalancingKMSClientProvider.java | 315 ++++++++++++++++++-
.../hadoop/hdfs/TestEncryptionZonesWithKMS.java | 19 +-
6 files changed, 464 insertions(+), 44 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a6d5c0c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index b3abd0c..20ad58c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -309,9 +309,8 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
* - HOSTNAME = string
* - PORT = integer
*
- * If multiple hosts are provider, the Factory will create a
- * {@link LoadBalancingKMSClientProvider} that round-robins requests
- * across the provided list of hosts.
+ * This will always create a {@link LoadBalancingKMSClientProvider}
+ * if the uri is correct.
*/
@Override
public KeyProvider createProvider(URI providerUri, Configuration conf)
@@ -338,30 +337,26 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
}
hostsPart = t[0];
}
- return createProvider(providerUri, conf, origUrl, port, hostsPart);
+ return createProvider(conf, origUrl, port, hostsPart);
}
return null;
}
- private KeyProvider createProvider(URI providerUri, Configuration conf,
+ private KeyProvider createProvider(Configuration conf,
URL origUrl, int port, String hostsPart) throws IOException {
String[] hosts = hostsPart.split(";");
- if (hosts.length == 1) {
- return new KMSClientProvider(providerUri, conf);
- } else {
- KMSClientProvider[] providers = new KMSClientProvider[hosts.length];
- for (int i = 0; i < hosts.length; i++) {
- try {
- providers[i] =
- new KMSClientProvider(
- new URI("kms", origUrl.getProtocol(), hosts[i], port,
- origUrl.getPath(), null, null), conf);
- } catch (URISyntaxException e) {
- throw new IOException("Could not instantiate KMSProvider..", e);
- }
+ KMSClientProvider[] providers = new KMSClientProvider[hosts.length];
+ for (int i = 0; i < hosts.length; i++) {
+ try {
+ providers[i] =
+ new KMSClientProvider(
+ new URI("kms", origUrl.getProtocol(), hosts[i], port,
+ origUrl.getPath(), null, null), conf);
+ } catch (URISyntaxException e) {
+ throw new IOException("Could not instantiate KMSProvider.", e);
}
- return new LoadBalancingKMSClientProvider(providers, conf);
}
+ return new LoadBalancingKMSClientProvider(providers, conf);
}
}
@@ -1078,7 +1073,11 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (Exception e) {
- throw new IOException(e);
+ if (e instanceof IOException) {
+ throw (IOException) e;
+ } else {
+ throw new IOException(e);
+ }
}
}
return tokens;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a6d5c0c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index de9c988..6b20c99 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.crypto.key.kms;
import java.io.IOException;
+import java.io.InterruptedIOException;
import java.security.GeneralSecurityException;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
@@ -31,9 +32,13 @@ import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -69,6 +74,8 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
private final KMSClientProvider[] providers;
private final AtomicInteger currentIdx;
+ private RetryPolicy retryPolicy = null;
+
public LoadBalancingKMSClientProvider(KMSClientProvider[] providers,
Configuration conf) {
this(shuffle(providers), Time.monotonicNow(), conf);
@@ -80,24 +87,79 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
super(conf);
this.providers = providers;
this.currentIdx = new AtomicInteger((int)(seed % providers.length));
+ int maxNumRetries = conf.getInt(CommonConfigurationKeysPublic.
+ KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, providers.length);
+ int sleepBaseMillis = conf.getInt(CommonConfigurationKeysPublic.
+ KMS_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY,
+ CommonConfigurationKeysPublic.
+ KMS_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT);
+ int sleepMaxMillis = conf.getInt(CommonConfigurationKeysPublic.
+ KMS_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY,
+ CommonConfigurationKeysPublic.
+ KMS_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT);
+ Preconditions.checkState(maxNumRetries >= 0);
+ Preconditions.checkState(sleepBaseMillis >= 0);
+ Preconditions.checkState(sleepMaxMillis >= 0);
+ this.retryPolicy = RetryPolicies.failoverOnNetworkException(
+ RetryPolicies.TRY_ONCE_THEN_FAIL, maxNumRetries, 0, sleepBaseMillis,
+ sleepMaxMillis);
}
@VisibleForTesting
- KMSClientProvider[] getProviders() {
+ public KMSClientProvider[] getProviders() {
return providers;
}
private <T> T doOp(ProviderCallable<T> op, int currPos)
throws IOException {
+ if (providers.length == 0) {
+ throw new IOException("No providers configured !");
+ }
IOException ex = null;
- for (int i = 0; i < providers.length; i++) {
+ int numFailovers = 0;
+ for (int i = 0;; i++, numFailovers++) {
KMSClientProvider provider = providers[(currPos + i) % providers.length];
try {
return op.call(provider);
+ } catch (AccessControlException ace) {
+ // No need to retry on AccessControlException
+ // and AuthorizationException.
+ // This assumes all the servers are configured with identical
+ // permissions and identical key acls.
+ throw ace;
} catch (IOException ioe) {
- LOG.warn("KMS provider at [{}] threw an IOException!! {}",
- provider.getKMSUrl(), StringUtils.stringifyException(ioe));
+ LOG.warn("KMS provider at [{}] threw an IOException: ",
+ provider.getKMSUrl(), ioe);
ex = ioe;
+
+ RetryAction action = null;
+ try {
+ action = retryPolicy.shouldRetry(ioe, 0, numFailovers, false);
+ } catch (Exception e) {
+ if (e instanceof IOException) {
+ throw (IOException)e;
+ }
+ throw new IOException(e);
+ }
+ if (action.action == RetryAction.RetryDecision.FAIL) {
+ LOG.warn("Aborting since the Request has failed with all KMS"
+ + " providers(depending on {}={} setting and numProviders={})"
+ + " in the group OR the exception is not recoverable",
+ CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY,
+ getConf().getInt(
+ CommonConfigurationKeysPublic.
+ KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, providers.length),
+ providers.length);
+ throw ex;
+ }
+ if (((numFailovers + 1) % providers.length) == 0) {
+ // Sleep only after we try all the providers for every cycle.
+ try {
+ Thread.sleep(action.delayMillis);
+ } catch (InterruptedException e) {
+ throw new InterruptedIOException("Thread Interrupted");
+ }
+ }
} catch (Exception e) {
if (e instanceof RuntimeException) {
throw (RuntimeException)e;
@@ -106,12 +168,6 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
}
}
}
- if (ex != null) {
- LOG.warn("Aborting since the Request has failed with all KMS"
- + " providers in the group. !!");
- throw ex;
- }
- throw new IOException("No providers configured !!");
}
private int nextIdx() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a6d5c0c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index b580f64..e8d4b4c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -724,6 +724,35 @@ public class CommonConfigurationKeysPublic {
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
* core-default.xml</a>
*/
+ /** Default value is the number of providers specified. */
+ public static final String KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY =
+ "hadoop.security.kms.client.failover.max.retries";
+
+ /**
+ * @see
+ * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ * core-default.xml</a>
+ */
+ public static final String KMS_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY =
+ "hadoop.security.kms.client.failover.sleep.base.millis";
+ /** Default value is 100 ms. */
+ public static final int KMS_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT = 100;
+
+ /**
+ * @see
+ * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ * core-default.xml</a>
+ */
+ public static final String KMS_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY =
+ "hadoop.security.kms.client.failover.sleep.max.millis";
+ /** Default value is 2 secs. */
+ public static final int KMS_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT = 2000;
+
+ /**
+ * @see
+ * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ * core-default.xml</a>
+ */
public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY =
"hadoop.security.java.secure.random.algorithm";
/** Defalt value for HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY */
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a6d5c0c/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 7cfb072..a705a4e 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2288,6 +2288,34 @@
</description>
</property>
+<property>
+ <name>hadoop.security.kms.client.failover.sleep.base.millis</name>
+ <value>100</value>
+ <description>
+ Expert only. The time to wait, in milliseconds, between failover
+ attempts increases exponentially as a function of the number of
+ attempts made so far, with a random factor of +/- 50%. This option
+ specifies the base value used in the failover calculation. The
+ first failover will retry immediately. The 2nd failover attempt
+ will delay at least hadoop.security.client.failover.sleep.base.millis
+ milliseconds. And so on.
+ </description>
+</property>
+
+<property>
+ <name>hadoop.security.kms.client.failover.sleep.max.millis</name>
+ <value>2000</value>
+ <description>
+ Expert only. The time to wait, in milliseconds, between failover
+ attempts increases exponentially as a function of the number of
+ attempts made so far, with a random factor of +/- 50%. This option
+ specifies the maximum value to wait between failovers.
+ Specifically, the time between two failover attempts will not
+ exceed +/- 50% of hadoop.security.client.failover.sleep.max.millis
+ milliseconds.
+ </description>
+</property>
+
<property>
<name>ipc.server.max.connections</name>
<value>0</value>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a6d5c0c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
index d14dd59..2c19722 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
@@ -23,9 +23,12 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.verify;
import java.io.IOException;
+import java.net.NoRouteToHostException;
import java.net.URI;
+import java.net.UnknownHostException;
import java.security.GeneralSecurityException;
import java.security.NoSuchAlgorithmException;
@@ -33,6 +36,9 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider.Options;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.net.ConnectTimeoutException;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.junit.Test;
@@ -47,14 +53,17 @@ public class TestLoadBalancingKMSClientProvider {
Configuration conf = new Configuration();
KeyProvider kp = new KMSClientProvider.Factory().createProvider(new URI(
"kms://http@host1/kms/foo"), conf);
- assertTrue(kp instanceof KMSClientProvider);
- assertEquals("http://host1/kms/foo/v1/",
- ((KMSClientProvider) kp).getKMSUrl());
+ assertTrue(kp instanceof LoadBalancingKMSClientProvider);
+ KMSClientProvider[] providers =
+ ((LoadBalancingKMSClientProvider) kp).getProviders();
+ assertEquals(1, providers.length);
+ assertEquals(Sets.newHashSet("http://host1/kms/foo/v1/"),
+ Sets.newHashSet(providers[0].getKMSUrl()));
kp = new KMSClientProvider.Factory().createProvider(new URI(
"kms://http@host1;host2;host3/kms/foo"), conf);
assertTrue(kp instanceof LoadBalancingKMSClientProvider);
- KMSClientProvider[] providers =
+ providers =
((LoadBalancingKMSClientProvider) kp).getProviders();
assertEquals(3, providers.length);
assertEquals(Sets.newHashSet("http://host1/kms/foo/v1/",
@@ -122,7 +131,7 @@ public class TestLoadBalancingKMSClientProvider {
// This should be retried
KMSClientProvider p4 = mock(KMSClientProvider.class);
when(p4.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new IOException("p4"));
+ .thenThrow(new ConnectTimeoutException("p4"));
when(p4.getKMSUrl()).thenReturn("p4");
KeyProvider kp = new LoadBalancingKMSClientProvider(
new KMSClientProvider[] { p1, p2, p3, p4 }, 0, conf);
@@ -320,4 +329,298 @@ public class TestLoadBalancingKMSClientProvider {
Mockito.verify(p1, Mockito.times(1)).warmUpEncryptedKeys(keyName);
Mockito.verify(p2, Mockito.times(1)).warmUpEncryptedKeys(keyName);
}
-}
+
+ /**
+ * Tests whether retryPolicy fails immediately on encountering IOException
+ * which is not SocketException.
+ * @throws Exception
+ */
+ @Test
+ public void testClientRetriesWithIOException() throws Exception {
+ Configuration conf = new Configuration();
+ // Setting total failover attempts to .
+ conf.setInt(
+ CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 10);
+ KMSClientProvider p1 = mock(KMSClientProvider.class);
+ when(p1.getMetadata(Mockito.anyString()))
+ .thenThrow(new IOException("p1"));
+ KMSClientProvider p2 = mock(KMSClientProvider.class);
+ when(p2.getMetadata(Mockito.anyString()))
+ .thenThrow(new IOException("p2"));
+ KMSClientProvider p3 = mock(KMSClientProvider.class);
+ when(p3.getMetadata(Mockito.anyString()))
+ .thenThrow(new IOException("p3"));
+
+ when(p1.getKMSUrl()).thenReturn("p1");
+ when(p2.getKMSUrl()).thenReturn("p2");
+ when(p3.getKMSUrl()).thenReturn("p3");
+ LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+ new KMSClientProvider[] {p1, p2, p3}, 0, conf);
+ try {
+ kp.getMetadata("test3");
+ fail("Should fail since all providers threw an IOException");
+ } catch (Exception e) {
+ assertTrue(e instanceof IOException);
+ }
+ verify(kp.getProviders()[0], Mockito.times(1))
+ .getMetadata(Mockito.eq("test3"));
+ verify(kp.getProviders()[1], Mockito.never())
+ .getMetadata(Mockito.eq("test3"));
+ verify(kp.getProviders()[2], Mockito.never())
+ .getMetadata(Mockito.eq("test3"));
+ }
+
+ /**
+ * Tests that client doesn't retry once it encounters AccessControlException
+ * from first provider.
+ * This assumes all the kms servers are configured with identical access to
+ * keys.
+ * @throws Exception
+ */
+ @Test
+ public void testClientRetriesWithAccessControlException() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setInt(
+ CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 3);
+ KMSClientProvider p1 = mock(KMSClientProvider.class);
+ when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new AccessControlException("p1"));
+ KMSClientProvider p2 = mock(KMSClientProvider.class);
+ when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new IOException("p2"));
+ KMSClientProvider p3 = mock(KMSClientProvider.class);
+ when(p3.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new IOException("p3"));
+
+ when(p1.getKMSUrl()).thenReturn("p1");
+ when(p2.getKMSUrl()).thenReturn("p2");
+ when(p3.getKMSUrl()).thenReturn("p3");
+ LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+ new KMSClientProvider[] {p1, p2, p3}, 0, conf);
+ try {
+ kp.createKey("test3", new Options(conf));
+ fail("Should fail because provider p1 threw an AccessControlException");
+ } catch (Exception e) {
+ assertTrue(e instanceof AccessControlException);
+ }
+ verify(p1, Mockito.times(1)).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ verify(p2, Mockito.never()).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ verify(p3, Mockito.never()).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ }
+
+ /**
+ * Tests that client doesn't retry once it encounters RunTimeException
+ * from first provider.
+ * This assumes all the kms servers are configured with identical access to
+ * keys.
+ * @throws Exception
+ */
+ @Test
+ public void testClientRetriesWithRuntimeException() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setInt(
+ CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 3);
+ KMSClientProvider p1 = mock(KMSClientProvider.class);
+ when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new RuntimeException("p1"));
+ KMSClientProvider p2 = mock(KMSClientProvider.class);
+ when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new IOException("p2"));
+
+ when(p1.getKMSUrl()).thenReturn("p1");
+ when(p2.getKMSUrl()).thenReturn("p2");
+
+ LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+ new KMSClientProvider[] {p1, p2}, 0, conf);
+ try {
+ kp.createKey("test3", new Options(conf));
+ fail("Should fail since provider p1 threw RuntimeException");
+ } catch (Exception e) {
+ assertTrue(e instanceof RuntimeException);
+ }
+ verify(p1, Mockito.times(1)).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ verify(p2, Mockito.never()).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ }
+
+ /**
+ * Tests the client retries until it finds a good provider.
+ * @throws Exception
+ */
+ @Test
+ public void testClientRetriesWithTimeoutsException() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setInt(
+ CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 4);
+ KMSClientProvider p1 = mock(KMSClientProvider.class);
+ when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new ConnectTimeoutException("p1"));
+ KMSClientProvider p2 = mock(KMSClientProvider.class);
+ when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new UnknownHostException("p2"));
+ KMSClientProvider p3 = mock(KMSClientProvider.class);
+ when(p3.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new NoRouteToHostException("p3"));
+ KMSClientProvider p4 = mock(KMSClientProvider.class);
+ when(p4.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenReturn(
+ new KMSClientProvider.KMSKeyVersion("test3", "v1", new byte[0]));
+ when(p1.getKMSUrl()).thenReturn("p1");
+ when(p2.getKMSUrl()).thenReturn("p2");
+ when(p3.getKMSUrl()).thenReturn("p3");
+ when(p4.getKMSUrl()).thenReturn("p4");
+ LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+ new KMSClientProvider[] {p1, p2, p3, p4}, 0, conf);
+ try {
+ kp.createKey("test3", new Options(conf));
+ } catch (Exception e) {
+ fail("Provider p4 should have answered the request.");
+ }
+ verify(p1, Mockito.times(1)).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ verify(p2, Mockito.times(1)).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ verify(p3, Mockito.times(1)).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ verify(p4, Mockito.times(1)).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ }
+
+ /**
+ * Tests the operation succeeds second time after ConnectTimeoutException.
+ * @throws Exception
+ */
+ @Test
+ public void testClientRetriesSucceedsSecondTime() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setInt(
+ CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 3);
+ KMSClientProvider p1 = mock(KMSClientProvider.class);
+ when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new ConnectTimeoutException("p1"))
+ .thenReturn(new KMSClientProvider.KMSKeyVersion("test3", "v1",
+ new byte[0]));
+ KMSClientProvider p2 = mock(KMSClientProvider.class);
+ when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new ConnectTimeoutException("p2"));
+
+ when(p1.getKMSUrl()).thenReturn("p1");
+ when(p2.getKMSUrl()).thenReturn("p2");
+
+ LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+ new KMSClientProvider[] {p1, p2}, 0, conf);
+ try {
+ kp.createKey("test3", new Options(conf));
+ } catch (Exception e) {
+ fail("Provider p1 should have answered the request second time.");
+ }
+ verify(p1, Mockito.times(2)).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ verify(p2, Mockito.times(1)).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ }
+
+ /**
+ * Tests whether retryPolicy retries specified number of times.
+ * @throws Exception
+ */
+ @Test
+ public void testClientRetriesSpecifiedNumberOfTimes() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setInt(
+ CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 10);
+ KMSClientProvider p1 = mock(KMSClientProvider.class);
+ when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new ConnectTimeoutException("p1"));
+ KMSClientProvider p2 = mock(KMSClientProvider.class);
+ when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new ConnectTimeoutException("p2"));
+
+ when(p1.getKMSUrl()).thenReturn("p1");
+ when(p2.getKMSUrl()).thenReturn("p2");
+
+ LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+ new KMSClientProvider[] {p1, p2}, 0, conf);
+ try {
+ kp.createKey("test3", new Options(conf));
+ fail("Should fail");
+ } catch (Exception e) {
+ assert (e instanceof ConnectTimeoutException);
+ }
+ verify(p1, Mockito.times(6)).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ verify(p2, Mockito.times(5)).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ }
+
+ /**
+ * Tests whether retryPolicy retries number of times equals to number of
+ * providers if conf kms.client.failover.max.attempts is not set.
+ * @throws Exception
+ */
+ @Test
+ public void testClientRetriesIfMaxAttemptsNotSet() throws Exception {
+ Configuration conf = new Configuration();
+ KMSClientProvider p1 = mock(KMSClientProvider.class);
+ when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new ConnectTimeoutException("p1"));
+ KMSClientProvider p2 = mock(KMSClientProvider.class);
+ when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new ConnectTimeoutException("p2"));
+
+ when(p1.getKMSUrl()).thenReturn("p1");
+ when(p2.getKMSUrl()).thenReturn("p2");
+
+ LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+ new KMSClientProvider[] {p1, p2}, 0, conf);
+ try {
+ kp.createKey("test3", new Options(conf));
+ fail("Should fail");
+ } catch (Exception e) {
+ assert (e instanceof ConnectTimeoutException);
+ }
+ verify(p1, Mockito.times(2)).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ verify(p2, Mockito.times(1)).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ }
+
+ /**
+ * Tests that client doesn't retry once it encounters AuthenticationException
+ * wrapped in an IOException from first provider.
+ * @throws Exception
+ */
+ @Test
+ public void testClientRetriesWithAuthenticationExceptionWrappedinIOException()
+ throws Exception {
+ Configuration conf = new Configuration();
+ conf.setInt(
+ CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 3);
+ KMSClientProvider p1 = mock(KMSClientProvider.class);
+ when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new IOException(new AuthenticationException("p1")));
+ KMSClientProvider p2 = mock(KMSClientProvider.class);
+ when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
+ .thenThrow(new ConnectTimeoutException("p2"));
+
+ when(p1.getKMSUrl()).thenReturn("p1");
+ when(p2.getKMSUrl()).thenReturn("p2");
+
+ LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+ new KMSClientProvider[] {p1, p2}, 0, conf);
+ try {
+ kp.createKey("test3", new Options(conf));
+ fail("Should fail since provider p1 threw AuthenticationException");
+ } catch (Exception e) {
+ assertTrue(e.getCause() instanceof AuthenticationException);
+ }
+ verify(p1, Mockito.times(1)).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ verify(p2, Mockito.never()).createKey(Mockito.eq("test3"),
+ Mockito.any(Options.class));
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a6d5c0c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
index 959e724..6f53362 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.assertTrue;
import com.google.common.base.Supplier;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
+import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
@@ -69,14 +70,21 @@ public class TestEncryptionZonesWithKMS extends TestEncryptionZones {
protected void setProvider() {
}
+ private KMSClientProvider getKMSClientProvider() {
+ LoadBalancingKMSClientProvider lbkmscp =
+ (LoadBalancingKMSClientProvider) Whitebox
+ .getInternalState(cluster.getNamesystem().getProvider(), "extension");
+ assert lbkmscp.getProviders().length == 1;
+ return lbkmscp.getProviders()[0];
+ }
+
@Test(timeout = 120000)
public void testCreateEZPopulatesEDEKCache() throws Exception {
final Path zonePath = new Path("/TestEncryptionZone");
fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false);
dfsAdmin.createEncryptionZone(zonePath, TEST_KEY, NO_TRASH);
@SuppressWarnings("unchecked")
- KMSClientProvider kcp = (KMSClientProvider) Whitebox
- .getInternalState(cluster.getNamesystem().getProvider(), "extension");
+ KMSClientProvider kcp = getKMSClientProvider();
assertTrue(kcp.getEncKeyQueueSize(TEST_KEY) > 0);
}
@@ -110,8 +118,7 @@ public class TestEncryptionZonesWithKMS extends TestEncryptionZones {
dfsAdmin.createEncryptionZone(zonePath, anotherKey, NO_TRASH);
@SuppressWarnings("unchecked")
- KMSClientProvider spy = (KMSClientProvider) Whitebox
- .getInternalState(cluster.getNamesystem().getProvider(), "extension");
+ KMSClientProvider spy = getKMSClientProvider();
assertTrue("key queue is empty after creating encryption zone",
spy.getEncKeyQueueSize(TEST_KEY) > 0);
@@ -122,9 +129,7 @@ public class TestEncryptionZonesWithKMS extends TestEncryptionZones {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
- final KMSClientProvider kspy = (KMSClientProvider) Whitebox
- .getInternalState(cluster.getNamesystem().getProvider(),
- "extension");
+ final KMSClientProvider kspy = getKMSClientProvider();
return kspy.getEncKeyQueueSize(TEST_KEY) > 0;
}
}, 1000, 60000);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[43/50] [abbrv] hadoop git commit: HDFS-12067. Correct dfsadmin
commands usage message to reflects IPC port. Contributed by steven-wugang.
Posted by xy...@apache.org.
HDFS-12067. Correct dfsadmin commands usage message to reflects IPC port. Contributed by steven-wugang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8cd55fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8cd55fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8cd55fe
Branch: refs/heads/HDFS-7240
Commit: f8cd55fe33665faf2d1b14df231516fc891118fc
Parents: df18025
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Wed Jul 19 23:21:43 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Wed Jul 19 23:21:43 2017 +0800
----------------------------------------------------------------------
.../org/apache/hadoop/hdfs/tools/DFSAdmin.java | 57 ++++++++++++--------
1 file changed, 34 insertions(+), 23 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8cd55fe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 06f408d..ea76093 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -1113,29 +1113,39 @@ public class DFSAdmin extends FsShell {
"\tor gets a list of reconfigurable properties.\n" +
"\tThe second parameter specifies the node type\n";
- String genericRefresh = "-refresh: Arguments are <hostname:port> <resource_identifier> [arg1..argn]\n" +
- "\tTriggers a runtime-refresh of the resource specified by <resource_identifier>\n" +
- "\ton <hostname:port>. All other args after are sent to the host.\n";
+ String genericRefresh = "-refresh: Arguments are <hostname:ipc_port>" +
+ " <resource_identifier> [arg1..argn]\n" +
+ "\tTriggers a runtime-refresh of the resource specified by " +
+ "<resource_identifier> on <hostname:ipc_port>.\n" +
+ "\tAll other args after are sent to the host.\n" +
+ "\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+ "default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
String printTopology = "-printTopology: Print a tree of the racks and their\n" +
"\t\tnodes as reported by the Namenode\n";
- String refreshNamenodes = "-refreshNamenodes: Takes a datanodehost:port as argument,\n"+
- "\t\tFor the given datanode, reloads the configuration files,\n" +
- "\t\tstops serving the removed block-pools\n"+
- "\t\tand starts serving new block-pools\n";
+ String refreshNamenodes = "-refreshNamenodes: Takes a " +
+ "datanodehost:ipc_port as argument,For the given datanode\n" +
+ "\t\treloads the configuration files,stops serving the removed\n" +
+ "\t\tblock-pools and starts serving new block-pools.\n" +
+ "\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+ "default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
- String getVolumeReport = "-getVolumeReport: Takes a datanodehost:port as "
- + "argument,\n\t\tFor the given datanode, get the volume report\n";
-
- String deleteBlockPool = "-deleteBlockPool: Arguments are datanodehost:port, blockpool id\n"+
- "\t\t and an optional argument \"force\". If force is passed,\n"+
- "\t\t block pool directory for the given blockpool id on the given\n"+
- "\t\t datanode is deleted along with its contents, otherwise\n"+
- "\t\t the directory is deleted only if it is empty. The command\n" +
- "\t\t will fail if datanode is still serving the block pool.\n" +
- "\t\t Refer to refreshNamenodes to shutdown a block pool\n" +
- "\t\t service on a datanode.\n";
+ String getVolumeReport = "-getVolumeReport: Takes a datanodehost:ipc_port"+
+ " as argument,For the given datanode,get the volume report.\n" +
+ "\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+ "default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
+
+ String deleteBlockPool = "-deleteBlockPool: Arguments are " +
+ "datanodehost:ipc_port, blockpool id and an optional argument\n" +
+ "\t\t\"force\". If force is passed,block pool directory for\n" +
+ "\t\tthe given blockpool id on the given datanode is deleted\n" +
+ "\t\talong with its contents,otherwise the directory is deleted\n"+
+ "\t\tonly if it is empty.The command will fail if datanode is\n" +
+ "\t\tstill serving the block pool.Refer to refreshNamenodes to\n" +
+ "\t\tshutdown a block pool service on a datanode.\n" +
+ "\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+ "default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
String setBalancerBandwidth = "-setBalancerBandwidth <bandwidth>:\n" +
"\tChanges the network bandwidth used by each datanode during\n" +
@@ -1893,23 +1903,24 @@ public class DFSAdmin extends FsShell {
+ " [-refreshCallQueue]");
} else if ("-reconfig".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
- + " [-reconfig <namenode|datanode> <host:port> "
+ + " [-reconfig <namenode|datanode> <host:ipc_port> "
+ "<start|status|properties>]");
} else if ("-refresh".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
- + " [-refresh <hostname:port> <resource_identifier> [arg1..argn]");
+ + " [-refresh <hostname:ipc_port> "
+ + "<resource_identifier> [arg1..argn]");
} else if ("-printTopology".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-printTopology]");
} else if ("-refreshNamenodes".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
- + " [-refreshNamenodes datanode-host:port]");
+ + " [-refreshNamenodes datanode-host:ipc_port]");
} else if ("-getVolumeReport".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
- + " [-getVolumeReport datanode-host:port]");
+ + " [-getVolumeReport datanode-host:ipc_port]");
} else if ("-deleteBlockPool".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
- + " [-deleteBlockPool datanode-host:port blockpoolId [force]]");
+ + " [-deleteBlockPool datanode-host:ipc_port blockpoolId [force]]");
} else if ("-setBalancerBandwidth".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-setBalancerBandwidth <bandwidth in bytes per second>]");
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[15/50] [abbrv] hadoop git commit: YARN-3260. AM attempt fail to
register before RM processes launch event. Contributed by Bibin A Chundatt
Posted by xy...@apache.org.
YARN-3260. AM attempt fail to register before RM processes launch event. Contributed by Bibin A Chundatt
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5ae5ac5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5ae5ac5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5ae5ac5
Branch: refs/heads/HDFS-7240
Commit: a5ae5ac50e97cf829c41dcf01655cd9bd4d36a00
Parents: 75c0220
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Fri Jul 14 14:56:00 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Fri Jul 14 14:56:00 2017 -0500
----------------------------------------------------------------------
.../rmapp/attempt/RMAppAttemptImpl.java | 20 +++++++-----
.../attempt/TestRMAppAttemptTransitions.java | 33 ++++++++++----------
2 files changed, 28 insertions(+), 25 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5ae5ac5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index d66a97d..4210c54 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -1205,6 +1205,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
@Override
public void transition(RMAppAttemptImpl appAttempt,
RMAppAttemptEvent event) {
+
+ appAttempt.registerClientToken();
appAttempt.launchAttempt();
}
}
@@ -1525,13 +1527,6 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
// Register with AMLivelinessMonitor
appAttempt.attemptLaunched();
- // register the ClientTokenMasterKey after it is saved in the store,
- // otherwise client may hold an invalid ClientToken after RM restarts.
- if (UserGroupInformation.isSecurityEnabled()) {
- appAttempt.rmContext.getClientToAMTokenSecretManager()
- .registerApplication(appAttempt.getAppAttemptId(),
- appAttempt.getClientTokenMasterKey());
- }
}
}
@@ -1598,11 +1593,20 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
appAttempt.amrmToken =
appAttempt.rmContext.getAMRMTokenSecretManager().createAndGetAMRMToken(
appAttempt.applicationAttemptId);
-
+ appAttempt.registerClientToken();
super.transition(appAttempt, event);
}
}
+ private void registerClientToken() {
+ // register the ClientTokenMasterKey after it is saved in the store,
+ // otherwise client may hold an invalid ClientToken after RM restarts.
+ if (UserGroupInformation.isSecurityEnabled()) {
+ rmContext.getClientToAMTokenSecretManager()
+ .registerApplication(getAppAttemptId(), getClientTokenMasterKey());
+ }
+ }
+
private static final class LaunchFailedTransition extends BaseFinalTransition {
public LaunchFailedTransition() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5ae5ac5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
index 9a4b6dc..7702ab1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
@@ -663,21 +663,29 @@ public class TestRMAppAttemptTransitions {
assertEquals(RMAppAttemptState.ALLOCATED_SAVING,
applicationAttempt.getAppAttemptState());
+
+ if (UserGroupInformation.isSecurityEnabled()) {
+ // Before SAVED state, can't create ClientToken as at this time
+ // ClientTokenMasterKey has not been registered in the SecretManager
+ assertNull(applicationAttempt.createClientToken("some client"));
+ }
+
applicationAttempt.handle(
new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),
RMAppAttemptEventType.ATTEMPT_NEW_SAVED));
-
+
+ if (UserGroupInformation.isSecurityEnabled()) {
+ // Before SAVED state, can't create ClientToken as at this time
+ // ClientTokenMasterKey has not been registered in the SecretManager
+ assertNotNull(applicationAttempt.createClientToken("some client"));
+ }
+
testAppAttemptAllocatedState(container);
return container;
}
private void launchApplicationAttempt(Container container) {
- if (UserGroupInformation.isSecurityEnabled()) {
- // Before LAUNCHED state, can't create ClientToken as at this time
- // ClientTokenMasterKey has not been registered in the SecretManager
- assertNull(applicationAttempt.createClientToken("some client"));
- }
applicationAttempt.handle(
new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),
RMAppAttemptEventType.LAUNCHED));
@@ -1477,8 +1485,6 @@ public class TestRMAppAttemptTransitions {
Token<ClientToAMTokenIdentifier> token =
applicationAttempt.createClientToken(null);
Assert.assertNull(token);
- token = applicationAttempt.createClientToken("clientuser");
- Assert.assertNull(token);
launchApplicationAttempt(amContainer);
// after attempt is launched , can get ClientToken
@@ -1505,22 +1511,15 @@ public class TestRMAppAttemptTransitions {
public void testApplicationAttemptMasterKey() throws Exception {
Container amContainer = allocateApplicationAttempt();
ApplicationAttemptId appid = applicationAttempt.getAppAttemptId();
- boolean isMasterKeyExisted = false;
+ boolean isMasterKeyExisted = clientToAMTokenManager.hasMasterKey(appid);
- // before attempt is launched, can not get MasterKey
- isMasterKeyExisted = clientToAMTokenManager.hasMasterKey(appid);
- Assert.assertFalse(isMasterKeyExisted);
-
- launchApplicationAttempt(amContainer);
- // after attempt is launched and in secure mode, can get MasterKey
- isMasterKeyExisted = clientToAMTokenManager.hasMasterKey(appid);
if (isSecurityEnabled) {
Assert.assertTrue(isMasterKeyExisted);
Assert.assertNotNull(clientToAMTokenManager.getMasterKey(appid));
} else {
Assert.assertFalse(isMasterKeyExisted);
}
-
+ launchApplicationAttempt(amContainer);
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt
.getAppAttemptId(), RMAppAttemptEventType.KILL));
assertEquals(YarnApplicationAttemptState.LAUNCHED,
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[18/50] [abbrv] hadoop git commit: HDFS-12140. Remove BPOfferService
lock contention to get block pool id. Contributed by Daryn Sharp.
Posted by xy...@apache.org.
HDFS-12140. Remove BPOfferService lock contention to get block pool id. Contributed by Daryn Sharp.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7d187a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7d187a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7d187a1
Branch: refs/heads/HDFS-7240
Commit: e7d187a1b6a826edd5bd0f708184d48f3674d489
Parents: 8d86a93
Author: Kihwal Lee <ki...@apache.org>
Authored: Fri Jul 14 16:07:17 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Fri Jul 14 16:07:17 2017 -0500
----------------------------------------------------------------------
.../hdfs/server/datanode/BPOfferService.java | 47 ++++++++++++++------
.../server/datanode/TestBPOfferService.java | 29 ++++++++++++
2 files changed, 63 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7d187a1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 0384f26..dbf7c8d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -72,6 +72,7 @@ class BPOfferService {
volatile DatanodeRegistration bpRegistration;
private final String nameserviceId;
+ private volatile String bpId;
private final DataNode dn;
/**
@@ -184,6 +185,11 @@ class BPOfferService {
}
String getBlockPoolId(boolean quiet) {
+ // avoid lock contention unless the registration hasn't completed.
+ String id = bpId;
+ if (id != null) {
+ return id;
+ }
readLock();
try {
if (bpNSInfo != null) {
@@ -205,7 +211,7 @@ class BPOfferService {
}
boolean hasBlockPoolId() {
- return getNamespaceInfo() != null;
+ return getBlockPoolId(true) != null;
}
NamespaceInfo getNamespaceInfo() {
@@ -217,6 +223,28 @@ class BPOfferService {
}
}
+ @VisibleForTesting
+ NamespaceInfo setNamespaceInfo(NamespaceInfo nsInfo) throws IOException {
+ writeLock();
+ try {
+ NamespaceInfo old = bpNSInfo;
+ if (bpNSInfo != null && nsInfo != null) {
+ checkNSEquality(bpNSInfo.getBlockPoolID(), nsInfo.getBlockPoolID(),
+ "Blockpool ID");
+ checkNSEquality(bpNSInfo.getNamespaceID(), nsInfo.getNamespaceID(),
+ "Namespace ID");
+ checkNSEquality(bpNSInfo.getClusterID(), nsInfo.getClusterID(),
+ "Cluster ID");
+ }
+ bpNSInfo = nsInfo;
+ // cache the block pool id for lock-free access.
+ bpId = (nsInfo != null) ? nsInfo.getBlockPoolID() : null;
+ return old;
+ } finally {
+ writeUnlock();
+ }
+ }
+
@Override
public String toString() {
readLock();
@@ -289,9 +317,10 @@ class BPOfferService {
private void checkBlock(ExtendedBlock block) {
Preconditions.checkArgument(block != null,
"block is null");
- Preconditions.checkArgument(block.getBlockPoolId().equals(getBlockPoolId()),
+ final String bpId = getBlockPoolId();
+ Preconditions.checkArgument(block.getBlockPoolId().equals(bpId),
"block belongs to BP %s instead of BP %s",
- block.getBlockPoolId(), getBlockPoolId());
+ block.getBlockPoolId(), bpId);
}
//This must be called only by blockPoolManager
@@ -337,8 +366,7 @@ class BPOfferService {
}
try {
- if (this.bpNSInfo == null) {
- this.bpNSInfo = nsInfo;
+ if (setNamespaceInfo(nsInfo) == null) {
boolean success = false;
// Now that we know the namespace ID, etc, we can pass this to the DN.
@@ -352,16 +380,9 @@ class BPOfferService {
// The datanode failed to initialize the BP. We need to reset
// the namespace info so that other BPService actors still have
// a chance to set it, and re-initialize the datanode.
- this.bpNSInfo = null;
+ setNamespaceInfo(null);
}
}
- } else {
- checkNSEquality(bpNSInfo.getBlockPoolID(), nsInfo.getBlockPoolID(),
- "Blockpool ID");
- checkNSEquality(bpNSInfo.getNamespaceID(), nsInfo.getNamespaceID(),
- "Namespace ID");
- checkNSEquality(bpNSInfo.getClusterID(), nsInfo.getClusterID(),
- "Cluster ID");
}
} finally {
writeUnlock();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7d187a1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index aa47eeb..ec19926 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -225,6 +225,35 @@ public class TestBPOfferService {
}
}
+ @Test
+ public void testLocklessBlockPoolId() throws Exception {
+ BPOfferService bpos = Mockito.spy(setupBPOSForNNs(mockNN1));
+
+ // bpNSInfo is not set, should take lock to check nsInfo.
+ assertNull(bpos.getBlockPoolId());
+ Mockito.verify(bpos).readLock();
+
+ // setting the bpNSInfo should cache the bp id, thus no locking.
+ Mockito.reset(bpos);
+ NamespaceInfo nsInfo = new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0);
+ assertNull(bpos.setNamespaceInfo(nsInfo));
+ assertEquals(FAKE_BPID, bpos.getBlockPoolId());
+ Mockito.verify(bpos, Mockito.never()).readLock();
+
+ // clearing the bpNSInfo should clear the cached bp id, thus requiring
+ // locking to check the bpNSInfo.
+ Mockito.reset(bpos);
+ assertEquals(nsInfo, bpos.setNamespaceInfo(null));
+ assertNull(bpos.getBlockPoolId());
+ Mockito.verify(bpos).readLock();
+
+ // test setting it again.
+ Mockito.reset(bpos);
+ assertNull(bpos.setNamespaceInfo(nsInfo));
+ assertEquals(FAKE_BPID, bpos.getBlockPoolId());
+ Mockito.verify(bpos, Mockito.never()).readLock();
+ }
+
/**
* Test that DNA_INVALIDATE commands from the standby are ignored.
*/
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[30/50] [abbrv] hadoop git commit: HADOOP-14539. Move commons logging
APIs over to slf4j in hadoop-common. Contributed by Wenxin He.
Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
index cbc9943..cebebd2 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.oncrpc;
import java.nio.ByteBuffer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.Channel;
@@ -29,6 +27,8 @@ import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.handler.codec.frame.FrameDecoder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public final class RpcUtil {
/**
@@ -63,7 +63,8 @@ public final class RpcUtil {
* each RPC client.
*/
static class RpcFrameDecoder extends FrameDecoder {
- public static final Log LOG = LogFactory.getLog(RpcFrameDecoder.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(RpcFrameDecoder.class);
private ChannelBuffer currentFrame;
@Override
@@ -107,8 +108,8 @@ public final class RpcUtil {
* request into a RpcInfo instance.
*/
static final class RpcMessageParserStage extends SimpleChannelUpstreamHandler {
- private static final Log LOG = LogFactory
- .getLog(RpcMessageParserStage.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(RpcMessageParserStage.class);
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java
index b72153a..23b6682 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java
@@ -17,20 +17,21 @@
*/
package org.apache.hadoop.oncrpc;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelStateEvent;
import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A simple TCP based RPC client handler used by {@link SimpleTcpServer}.
*/
public class SimpleTcpClientHandler extends SimpleChannelHandler {
- public static final Log LOG = LogFactory.getLog(SimpleTcpClient.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(SimpleTcpClient.class);
protected final XDR request;
public SimpleTcpClientHandler(XDR request) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
index bd48b15..177fa3d 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.oncrpc;
import java.net.InetSocketAddress;
import java.util.concurrent.Executors;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelFactory;
@@ -30,12 +28,15 @@ import org.jboss.netty.channel.ChannelPipelineFactory;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Simple UDP server implemented using netty.
*/
public class SimpleTcpServer {
- public static final Log LOG = LogFactory.getLog(SimpleTcpServer.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(SimpleTcpServer.class);
protected final int port;
protected int boundPort = -1; // Will be set after server starts
protected final SimpleChannelUpstreamHandler rpcProgram;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java
index d691aba..e65003c 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java
@@ -20,20 +20,21 @@ package org.apache.hadoop.oncrpc;
import java.net.InetSocketAddress;
import java.util.concurrent.Executors;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.jboss.netty.bootstrap.ConnectionlessBootstrap;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.channel.socket.DatagramChannelFactory;
import org.jboss.netty.channel.socket.nio.NioDatagramChannelFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Simple UDP server implemented based on netty.
*/
public class SimpleUdpServer {
- public static final Log LOG = LogFactory.getLog(SimpleUdpServer.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(SimpleUdpServer.class);
private final int SEND_BUFFER_SIZE = 65536;
private final int RECEIVE_BUFFER_SIZE = 65536;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java
index fe4350b..64edf48 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java
@@ -18,16 +18,16 @@
package org.apache.hadoop.oncrpc.security;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.XDR;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Base class for all credentials. Currently we only support 3 different types
* of auth flavors: AUTH_NONE, AUTH_SYS, and RPCSEC_GSS.
*/
public abstract class Credentials extends RpcAuthInfo {
- public static final Log LOG = LogFactory.getLog(Credentials.class);
+ public static final Logger LOG = LoggerFactory.getLogger(Credentials.class);
public static Credentials readFlavorAndCredentials(XDR xdr) {
AuthFlavor flavor = AuthFlavor.fromValue(xdr.readInt());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java
index 93efba8..4a674e8 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java
@@ -19,13 +19,14 @@ package org.apache.hadoop.oncrpc.security;
import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcCall;
import org.apache.hadoop.oncrpc.XDR;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public abstract class SecurityHandler {
- public static final Log LOG = LogFactory.getLog(SecurityHandler.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(SecurityHandler.class);
public abstract String getUser();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
index 7586fda..123999d 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
@@ -22,8 +22,6 @@ import java.net.SocketAddress;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.RpcUtil;
import org.apache.hadoop.util.StringUtils;
@@ -41,12 +39,14 @@ import org.jboss.netty.handler.timeout.IdleStateHandler;
import org.jboss.netty.util.HashedWheelTimer;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Portmap service for binding RPC protocols. See RFC 1833 for details.
*/
final class Portmap {
- private static final Log LOG = LogFactory.getLog(Portmap.class);
+ private static final Logger LOG = LoggerFactory.getLogger(Portmap.class);
private static final int DEFAULT_IDLE_TIME_MILLISECONDS = 5000;
private ConnectionlessBootstrap udpServer;
@@ -65,7 +65,7 @@ final class Portmap {
pm.start(DEFAULT_IDLE_TIME_MILLISECONDS,
new InetSocketAddress(port), new InetSocketAddress(port));
} catch (Throwable e) {
- LOG.fatal("Failed to start the server. Cause:", e);
+ LOG.error("Failed to start the server. Cause:", e);
pm.shutdown();
System.exit(-1);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java
index 67175d0..0bc380f 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.portmap;
import java.util.concurrent.ConcurrentHashMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
import org.apache.hadoop.oncrpc.RpcCall;
import org.apache.hadoop.oncrpc.RpcInfo;
@@ -39,6 +37,8 @@ import org.jboss.netty.channel.group.ChannelGroup;
import org.jboss.netty.handler.timeout.IdleState;
import org.jboss.netty.handler.timeout.IdleStateAwareChannelUpstreamHandler;
import org.jboss.netty.handler.timeout.IdleStateEvent;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler {
static final int PROGRAM = 100000;
@@ -51,7 +51,8 @@ final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler {
static final int PMAPPROC_DUMP = 4;
static final int PMAPPROC_GETVERSADDR = 9;
- private static final Log LOG = LogFactory.getLog(RpcProgramPortmap.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RpcProgramPortmap.class);
private final ConcurrentHashMap<String, PortmapMapping> map = new ConcurrentHashMap<String, PortmapMapping>();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[33/50] [abbrv] hadoop git commit: HADOOP-14539. Move commons logging
APIs over to slf4j in hadoop-common. Contributed by Wenxin He.
Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
index 1074e87..994eb13 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
@@ -24,12 +24,11 @@ import java.util.Set;
import static com.google.common.base.Preconditions.*;
import com.google.common.collect.Sets;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Helper class to manage a group of mutable rate metrics
@@ -43,7 +42,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MutableRates extends MutableMetric {
- static final Log LOG = LogFactory.getLog(MutableRates.class);
+ static final Logger LOG = LoggerFactory.getLogger(MutableRates.class);
private final MetricsRegistry registry;
private final Set<Class<?>> protocolCache = Sets.newHashSet();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java
index 9827ca7..26a1506 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java
@@ -27,12 +27,12 @@ import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.concurrent.ConcurrentMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.util.SampleStat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
@@ -48,7 +48,8 @@ import org.apache.hadoop.metrics2.util.SampleStat;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MutableRatesWithAggregation extends MutableMetric {
- static final Log LOG = LogFactory.getLog(MutableRatesWithAggregation.class);
+ static final Logger LOG =
+ LoggerFactory.getLogger(MutableRatesWithAggregation.class);
private final Map<String, MutableRate> globalMetrics =
new ConcurrentHashMap<>();
private final Set<Class<?>> protocolCache = Sets.newHashSet();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
index 5c58d52..de4c14d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
@@ -19,8 +19,6 @@
package org.apache.hadoop.metrics2.sink;
import org.apache.commons.configuration2.SubsetConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.AbstractMetric;
@@ -28,6 +26,8 @@ import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsSink;
import org.apache.hadoop.metrics2.MetricsTag;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.IOException;
@@ -42,7 +42,8 @@ import java.nio.charset.StandardCharsets;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class GraphiteSink implements MetricsSink, Closeable {
- private static final Log LOG = LogFactory.getLog(GraphiteSink.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(GraphiteSink.class);
private static final String SERVER_HOST_KEY = "server_host";
private static final String SERVER_PORT_KEY = "server_port";
private static final String METRICS_PREFIX = "metrics_prefix";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
index 4125461..804e903 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
@@ -26,11 +26,11 @@ import java.util.List;
import java.util.Map;
import org.apache.commons.configuration2.SubsetConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsSink;
import org.apache.hadoop.metrics2.util.Servers;
import org.apache.hadoop.net.DNS;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This the base class for Ganglia sink classes using metrics2. Lot of the code
@@ -41,7 +41,7 @@ import org.apache.hadoop.net.DNS;
*/
public abstract class AbstractGangliaSink implements MetricsSink {
- public final Log LOG = LogFactory.getLog(this.getClass());
+ public final Logger LOG = LoggerFactory.getLogger(this.getClass());
/*
* Output of "gmetric --help" showing allowable values
@@ -127,7 +127,7 @@ public abstract class AbstractGangliaSink implements MetricsSink {
conf.getString("dfs.datanode.dns.interface", "default"),
conf.getString("dfs.datanode.dns.nameserver", "default"));
} catch (UnknownHostException uhe) {
- LOG.error(uhe);
+ LOG.error(uhe.toString());
hostName = "UNKNOWN.example.com";
}
}
@@ -155,7 +155,7 @@ public abstract class AbstractGangliaSink implements MetricsSink {
datagramSocket = new DatagramSocket();
}
} catch (IOException e) {
- LOG.error(e);
+ LOG.error(e.toString());
}
// see if sparseMetrics is supported. Default is false
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java
index ffccfb6..3e8314e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink30.java
@@ -28,8 +28,6 @@ import java.util.Set;
import org.apache.commons.configuration2.SubsetConfiguration;
import org.apache.commons.configuration2.convert.DefaultListDelimiterHandler;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsException;
@@ -38,6 +36,8 @@ import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.impl.MsInfo;
import org.apache.hadoop.metrics2.util.MetricsCache;
import org.apache.hadoop.metrics2.util.MetricsCache.Record;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This code supports Ganglia 3.0
@@ -45,7 +45,7 @@ import org.apache.hadoop.metrics2.util.MetricsCache.Record;
*/
public class GangliaSink30 extends AbstractGangliaSink {
- public final Log LOG = LogFactory.getLog(this.getClass());
+ public final Logger LOG = LoggerFactory.getLogger(this.getClass());
private static final String TAGS_FOR_PREFIX_PROPERTY_PREFIX = "tagsForPrefix.";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink31.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink31.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink31.java
index 1d92177..5aebff8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink31.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/GangliaSink31.java
@@ -18,10 +18,11 @@
package org.apache.hadoop.metrics2.sink.ganglia;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
/**
* This code supports Ganglia 3.1
@@ -29,7 +30,7 @@ import org.apache.commons.logging.LogFactory;
*/
public class GangliaSink31 extends GangliaSink30 {
- public final Log LOG = LogFactory.getLog(this.getClass());
+ public final Logger LOG = LoggerFactory.getLogger(this.getClass());
/**
* The method sends metrics to Ganglia servers. The method has been taken from
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
index 7ec07aa..ded49d6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
@@ -25,11 +25,11 @@ import javax.management.InstanceAlreadyExistsException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This util class provides a method to register an MBean using
@@ -39,7 +39,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MBeans {
- private static final Log LOG = LogFactory.getLog(MBeans.class);
+ private static final Logger LOG = LoggerFactory.getLogger(MBeans.class);
private static final String DOMAIN_PREFIX = "Hadoop:";
private static final String SERVICE_PREFIX = "service=";
private static final String NAME_PREFIX = "name=";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java
index cfd126c..6cfbc39 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java
@@ -19,13 +19,13 @@
package org.apache.hadoop.metrics2.util;
import com.google.common.collect.Maps;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsTag;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.LinkedHashMap;
@@ -39,7 +39,7 @@ import java.util.StringJoiner;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MetricsCache {
- static final Log LOG = LogFactory.getLog(MetricsCache.class);
+ static final Logger LOG = LoggerFactory.getLogger(MetricsCache.class);
static final int MAX_RECS_PER_NAME_DEFAULT = 1000;
private final Map<String, RecordCache> map = Maps.newHashMap();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
index a6dc8e3..81041c1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
@@ -20,10 +20,10 @@ package org.apache.hadoop.net;
import com.google.common.net.InetAddresses;
import com.sun.istack.Nullable;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.net.InetAddress;
import java.net.NetworkInterface;
@@ -52,7 +52,7 @@ import javax.naming.directory.InitialDirContext;
@InterfaceStability.Unstable
public class DNS {
- private static final Log LOG = LogFactory.getLog(DNS.class);
+ private static final Logger LOG = LoggerFactory.getLogger(DNS.class);
/**
* The cached hostname -initially null.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index 4050107..8577336 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -44,8 +44,6 @@ import java.util.concurrent.ConcurrentHashMap;
import javax.net.SocketFactory;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.commons.net.util.SubnetUtils;
import org.apache.commons.net.util.SubnetUtils.SubnetInfo;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -58,11 +56,13 @@ import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class NetUtils {
- private static final Log LOG = LogFactory.getLog(NetUtils.class);
+ private static final Logger LOG = LoggerFactory.getLogger(NetUtils.class);
private static Map<String, String> hostToResolved =
new HashMap<String, String>();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java
index 3dcb610..02b44a5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java
@@ -21,13 +21,13 @@ package org.apache.hadoop.net;
import java.util.*;
import java.io.*;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class implements the {@link DNSToSwitchMapping} interface using a
@@ -145,8 +145,8 @@ public class ScriptBasedMapping extends CachedDNSToSwitchMapping {
extends AbstractDNSToSwitchMapping {
private String scriptName;
private int maxArgs; //max hostnames per call of the script
- private static final Log LOG =
- LogFactory.getLog(ScriptBasedMapping.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ScriptBasedMapping.class);
/**
* Set the configuration and extract the configuration parameters of interest
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
index b50f7e9..f489581 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
@@ -31,9 +31,9 @@ import java.nio.channels.spi.SelectorProvider;
import java.util.Iterator;
import java.util.LinkedList;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This supports input and output streams for a socket channels.
@@ -42,7 +42,7 @@ import org.apache.hadoop.util.Time;
abstract class SocketIOWithTimeout {
// This is intentionally package private.
- static final Log LOG = LogFactory.getLog(SocketIOWithTimeout.class);
+ static final Logger LOG = LoggerFactory.getLogger(SocketIOWithTimeout.class);
private SelectableChannel channel;
private long timeout;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
index 362cf07..ead9a74 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
@@ -29,12 +29,12 @@ import java.util.List;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* <p>
@@ -56,7 +56,7 @@ import org.apache.hadoop.conf.Configured;
@InterfaceStability.Evolving
public class TableMapping extends CachedDNSToSwitchMapping {
- private static final Log LOG = LogFactory.getLog(TableMapping.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TableMapping.class);
public TableMapping() {
super(new RawTableMapping());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
index 8379fd1..ac118c0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
@@ -29,12 +29,12 @@ import java.nio.channels.ReadableByteChannel;
import java.nio.ByteBuffer;
import org.apache.commons.lang.SystemUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.CloseableReferenceCount;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* The implementation of UNIX domain sockets in Java.
@@ -60,7 +60,7 @@ public class DomainSocket implements Closeable {
}
}
- static Log LOG = LogFactory.getLog(DomainSocket.class);
+ static final Logger LOG = LoggerFactory.getLogger(DomainSocket.class);
/**
* True only if we should validate the paths used in
@@ -459,13 +459,13 @@ public class DomainSocket implements Closeable {
try {
closeFileDescriptor0(descriptors[i]);
} catch (Throwable t) {
- LOG.warn(t);
+ LOG.warn(t.toString());
}
} else if (streams[i] != null) {
try {
streams[i].close();
} catch (Throwable t) {
- LOG.warn(t);
+ LOG.warn(t.toString());
} finally {
streams[i] = null; }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
index e1bcf7e..c7af97f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
@@ -33,13 +33,13 @@ import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.lang.SystemUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.Uninterruptibles;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* The DomainSocketWatcher watches a set of domain sockets to see when they
@@ -68,7 +68,7 @@ public final class DomainSocketWatcher implements Closeable {
}
}
- static Log LOG = LogFactory.getLog(DomainSocketWatcher.class);
+ static final Logger LOG = LoggerFactory.getLogger(DomainSocketWatcher.class);
/**
* The reason why DomainSocketWatcher is not available, or null if it is
@@ -306,7 +306,7 @@ public final class DomainSocketWatcher implements Closeable {
try {
if (closed) {
handler.handle(sock);
- IOUtils.cleanup(LOG, sock);
+ IOUtils.cleanupWithLogger(LOG, sock);
return;
}
Entry entry = new Entry(sock, handler);
@@ -411,7 +411,7 @@ public final class DomainSocketWatcher implements Closeable {
this + ": file descriptor " + sock.fd + " was closed while " +
"still in the poll(2) loop.");
}
- IOUtils.cleanup(LOG, sock);
+ IOUtils.cleanupWithLogger(LOG, sock);
fdSet.remove(fd);
return true;
} else {
@@ -524,7 +524,7 @@ public final class DomainSocketWatcher implements Closeable {
Entry entry = iter.next();
entry.getDomainSocket().refCount.unreference();
entry.getHandler().handle(entry.getDomainSocket());
- IOUtils.cleanup(LOG, entry.getDomainSocket());
+ IOUtils.cleanupWithLogger(LOG, entry.getDomainSocket());
iter.remove();
}
// Items in toRemove might not be really removed, handle it here
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
index 751cf02..c97f8ad 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
@@ -17,13 +17,13 @@
*/
package org.apache.hadoop.security;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import javax.servlet.FilterChain;
import javax.servlet.ServletException;
@@ -42,8 +42,8 @@ import java.util.List;
*/
public class AuthenticationWithProxyUserFilter extends AuthenticationFilter {
- public static final Log LOG =
- LogFactory.getLog(AuthenticationWithProxyUserFilter.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(AuthenticationWithProxyUserFilter.class);
/**
* Constant used in URL's query string to perform a proxy user request, the
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
index ffa7e2b..b8cfdf7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
@@ -25,13 +25,13 @@ import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* An implementation of {@link GroupMappingServiceProvider} which
@@ -48,7 +48,8 @@ public class CompositeGroupsMapping
public static final String MAPPING_PROVIDERS_COMBINED_CONFIG_KEY = MAPPING_PROVIDERS_CONFIG_KEY + ".combined";
public static final String MAPPING_PROVIDER_CONFIG_PREFIX = GROUP_MAPPING_CONFIG_PREFIX + ".provider";
- private static final Log LOG = LogFactory.getLog(CompositeGroupsMapping.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(CompositeGroupsMapping.class);
private List<GroupMappingServiceProvider> providersList =
new ArrayList<GroupMappingServiceProvider>();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
index 1283d8f..4d58981 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
@@ -36,8 +36,6 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -52,6 +50,8 @@ import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.proto.SecurityProtos.CredentialsKVProto;
import org.apache.hadoop.security.proto.SecurityProtos.CredentialsProto;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A class that provides the facilities of reading and writing
@@ -60,7 +60,7 @@ import org.apache.hadoop.security.proto.SecurityProtos.CredentialsProto;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class Credentials implements Writable {
- private static final Log LOG = LogFactory.getLog(Credentials.class);
+ private static final Logger LOG = LoggerFactory.getLogger(Credentials.class);
private Map<Text, byte[]> secretKeysMap = new HashMap<Text, byte[]>();
private Map<Text, Token<? extends TokenIdentifier>> tokenMap =
@@ -188,7 +188,7 @@ public class Credentials implements Writable {
} catch(IOException ioe) {
throw new IOException("Exception reading " + filename, ioe);
} finally {
- IOUtils.cleanup(LOG, in);
+ IOUtils.cleanupWithLogger(LOG, in);
}
}
@@ -211,7 +211,7 @@ public class Credentials implements Writable {
} catch(IOException ioe) {
throw new IOException("Exception reading " + filename, ioe);
} finally {
- IOUtils.cleanup(LOG, in);
+ IOUtils.cleanupWithLogger(LOG, in);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
index 596259a..ad09865 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
@@ -59,9 +59,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Timer;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A user-to-groups mapping service.
@@ -74,7 +73,7 @@ import org.apache.commons.logging.LogFactory;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class Groups {
- private static final Log LOG = LogFactory.getLog(Groups.class);
+ private static final Logger LOG = LoggerFactory.getLogger(Groups.class);
private final GroupMappingServiceProvider impl;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HttpCrossOriginFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HttpCrossOriginFilterInitializer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HttpCrossOriginFilterInitializer.java
index f9c1816..47b5a58 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HttpCrossOriginFilterInitializer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HttpCrossOriginFilterInitializer.java
@@ -21,20 +21,20 @@ package org.apache.hadoop.security;
import java.util.HashMap;
import java.util.Map;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.FilterInitializer;
import org.apache.hadoop.security.http.CrossOriginFilter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class HttpCrossOriginFilterInitializer extends FilterInitializer {
public static final String PREFIX = "hadoop.http.cross-origin.";
public static final String ENABLED_SUFFIX = "enabled";
- private static final Log LOG =
- LogFactory.getLog(HttpCrossOriginFilterInitializer.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(HttpCrossOriginFilterInitializer.class);
@Override
public void initFilter(FilterContainer container, Configuration conf) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java
index d397e44..a0f6142 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java
@@ -25,9 +25,9 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A JNI-based implementation of {@link GroupMappingServiceProvider}
@@ -38,8 +38,8 @@ import org.apache.hadoop.util.NativeCodeLoader;
@InterfaceStability.Evolving
public class JniBasedUnixGroupsMapping implements GroupMappingServiceProvider {
- private static final Log LOG =
- LogFactory.getLog(JniBasedUnixGroupsMapping.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(JniBasedUnixGroupsMapping.class);
static {
if (!NativeCodeLoader.isNativeCodeLoaded()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java
index 40333fc..f164430 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java
@@ -21,16 +21,16 @@ package org.apache.hadoop.security;
import java.io.IOException;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.PerformanceAdvisory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class JniBasedUnixGroupsMappingWithFallback implements
GroupMappingServiceProvider {
- private static final Log LOG = LogFactory
- .getLog(JniBasedUnixGroupsMappingWithFallback.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(JniBasedUnixGroupsMappingWithFallback.class);
private GroupMappingServiceProvider impl;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java
index ff4ab98..9ba55e4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java
@@ -26,11 +26,11 @@ import java.util.LinkedList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.security.NetgroupCache;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A JNI-based implementation of {@link GroupMappingServiceProvider}
@@ -42,7 +42,7 @@ import org.apache.hadoop.security.NetgroupCache;
public class JniBasedUnixGroupsNetgroupMapping
extends JniBasedUnixGroupsMapping {
- private static final Log LOG = LogFactory.getLog(
+ private static final Logger LOG = LoggerFactory.getLogger(
JniBasedUnixGroupsNetgroupMapping.class);
native String[] getUsersForNetgroupJNI(String group);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java
index 7d77c10..fcc47cb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java
@@ -21,15 +21,15 @@ package org.apache.hadoop.security;
import java.io.IOException;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class JniBasedUnixGroupsNetgroupMappingWithFallback implements
GroupMappingServiceProvider {
- private static final Log LOG = LogFactory
- .getLog(JniBasedUnixGroupsNetgroupMappingWithFallback.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(JniBasedUnixGroupsNetgroupMappingWithFallback.class);
private GroupMappingServiceProvider impl;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
index 1a184e8..babfa38 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
@@ -41,12 +41,12 @@ import javax.naming.directory.SearchResult;
import javax.naming.ldap.LdapName;
import javax.naming.ldap.Rdn;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* An implementation of {@link GroupMappingServiceProvider} which
@@ -211,7 +211,8 @@ public class LdapGroupsMapping
LDAP_CONFIG_PREFIX + ".read.timeout.ms";
public static final int READ_TIMEOUT_DEFAULT = 60 * 1000; // 60 seconds
- private static final Log LOG = LogFactory.getLog(LdapGroupsMapping.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(LdapGroupsMapping.class);
static final SearchControls SEARCH_CONTROLS = new SearchControls();
static {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
index 013e56c..8dcf8b9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
@@ -26,14 +26,14 @@ import java.net.URL;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.IOUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
import org.apache.hadoop.security.alias.LocalJavaKeyStoreProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Utility methods for both key and credential provider APIs.
@@ -57,7 +57,8 @@ public final class ProviderUtils {
"Please review the documentation regarding provider passwords in\n" +
"the keystore passwords section of the Credential Provider API\n";
- private static final Log LOG = LogFactory.getLog(ProviderUtils.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ProviderUtils.class);
/**
* Hidden ctor to ensure that this utility class isn't
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslInputStream.java
index a3d66b9..a91a90a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslInputStream.java
@@ -30,10 +30,10 @@ import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A SaslInputStream is composed of an InputStream and a SaslServer (or
@@ -45,7 +45,8 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class SaslInputStream extends InputStream implements ReadableByteChannel {
- public static final Log LOG = LogFactory.getLog(SaslInputStream.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(SaslInputStream.class);
private final DataInputStream inStream;
/** Should we wrap the communication channel? */
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
index 388f1b2..11714b1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
@@ -45,8 +45,6 @@ import javax.security.sasl.Sasl;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslClient;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -75,6 +73,9 @@ import org.apache.hadoop.util.ProtoUtil;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.ByteString;
import com.google.re2j.Pattern;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
/**
* A utility class that encapsulates SASL logic for RPC client
*/
@@ -82,7 +83,7 @@ import com.google.re2j.Pattern;
@InterfaceStability.Evolving
public class SaslRpcClient {
// This log is public as it is referenced in tests
- public static final Log LOG = LogFactory.getLog(SaslRpcClient.class);
+ public static final Logger LOG = LoggerFactory.getLogger(SaslRpcClient.class);
private final UserGroupInformation ugi;
private final Class<?> protocol;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
index a6fbb6d..643af79 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
@@ -45,8 +45,6 @@ import javax.security.sasl.SaslServer;
import javax.security.sasl.SaslServerFactory;
import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -57,6 +55,8 @@ import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.TokenIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A utility class for dealing with SASL on RPC server
@@ -64,7 +64,7 @@ import org.apache.hadoop.security.token.TokenIdentifier;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class SaslRpcServer {
- public static final Log LOG = LogFactory.getLog(SaslRpcServer.class);
+ public static final Logger LOG = LoggerFactory.getLogger(SaslRpcServer.class);
public static final String SASL_DEFAULT_REALM = "default";
private static SaslServerFactory saslFactory;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index b7d1ec0..20e8754 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -36,8 +36,6 @@ import javax.annotation.Nullable;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.kerberos.KerberosTicket;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -51,7 +49,8 @@ import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.util.StopWatch;
import org.apache.hadoop.util.StringUtils;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
//this will need to be replaced someday when there is a suitable replacement
import sun.net.dns.ResolverConfiguration;
import sun.net.util.IPAddressUtil;
@@ -64,7 +63,7 @@ import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public final class SecurityUtil {
- public static final Log LOG = LogFactory.getLog(SecurityUtil.class);
+ public static final Logger LOG = LoggerFactory.getLogger(SecurityUtil.class);
public static final String HOSTNAME_PATTERN = "_HOST";
public static final String FAILED_TO_GET_UGI_MSG_HEADER =
"Failed to obtain user group information:";
@@ -473,7 +472,7 @@ public final class SecurityUtil {
try {
ugi = UserGroupInformation.getLoginUser();
} catch (IOException e) {
- LOG.fatal("Exception while getting login user", e);
+ LOG.error("Exception while getting login user", e);
e.printStackTrace();
Runtime.getRuntime().exit(-1);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
index efc1fd6..2ed9677 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
@@ -29,14 +29,14 @@ import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A simple shell-based implementation of {@link IdMappingServiceProvider}
@@ -62,8 +62,8 @@ import com.google.common.collect.HashBiMap;
*/
public class ShellBasedIdMapping implements IdMappingServiceProvider {
- private static final Log LOG =
- LogFactory.getLog(ShellBasedIdMapping.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ShellBasedIdMapping.class);
private final static String OS = System.getProperty("os.name");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsNetgroupMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsNetgroupMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsNetgroupMapping.java
index 4aa4e9f..eff6985 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsNetgroupMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsNetgroupMapping.java
@@ -23,12 +23,12 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ExitCodeException;
import org.apache.hadoop.security.NetgroupCache;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A simple shell-based implementation of {@link GroupMappingServiceProvider}
@@ -40,8 +40,8 @@ import org.apache.hadoop.security.NetgroupCache;
public class ShellBasedUnixGroupsNetgroupMapping
extends ShellBasedUnixGroupsMapping {
- private static final Log LOG =
- LogFactory.getLog(ShellBasedUnixGroupsNetgroupMapping.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ShellBasedUnixGroupsNetgroupMapping.class);
/**
* Get unix groups (parent) and netgroups for given user
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/WhitelistBasedResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/WhitelistBasedResolver.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/WhitelistBasedResolver.java
index 8d4df64..a64c4de 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/WhitelistBasedResolver.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/WhitelistBasedResolver.java
@@ -24,13 +24,13 @@ import java.util.TreeMap;
import javax.security.sasl.Sasl;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.SaslPropertiesResolver;
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
import org.apache.hadoop.util.CombinedIPWhiteList;
import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
@@ -54,7 +54,8 @@ import org.apache.hadoop.util.StringUtils;
*
*/
public class WhitelistBasedResolver extends SaslPropertiesResolver {
- public static final Log LOG = LogFactory.getLog(WhitelistBasedResolver.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(WhitelistBasedResolver.class);
private static final String FIXEDWHITELIST_DEFAULT_LOCATION = "/etc/hadoop/fixedwhitelist";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
index 8e4a0a5..df783f1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.security.alias;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -27,6 +25,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.ProviderUtils;
import com.google.common.base.Charsets;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import javax.crypto.spec.SecretKeySpec;
import java.io.IOException;
@@ -60,7 +60,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
*/
@InterfaceAudience.Private
public abstract class AbstractJavaKeyStoreProvider extends CredentialProvider {
- public static final Log LOG = LogFactory.getLog(
+ public static final Logger LOG = LoggerFactory.getLogger(
AbstractJavaKeyStoreProvider.class);
public static final String CREDENTIAL_PASSWORD_ENV_VAR =
"HADOOP_CREDSTORE_PASSWORD";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
index 9da95dc..4c47348 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
@@ -23,8 +23,6 @@ import java.util.IdentityHashMap;
import java.util.Map;
import java.util.Set;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
@@ -36,6 +34,8 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.MachineList;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* An authorization manager which handles service-level authorization
@@ -69,8 +69,9 @@ public class ServiceAuthorizationManager {
public static final String SERVICE_AUTHORIZATION_CONFIG =
"hadoop.security.authorization";
- public static final Log AUDITLOG =
- LogFactory.getLog("SecurityLogger."+ServiceAuthorizationManager.class.getName());
+ public static final Logger AUDITLOG =
+ LoggerFactory.getLogger(
+ "SecurityLogger." + ServiceAuthorizationManager.class.getName());
private static final String AUTHZ_SUCCESSFUL_FOR = "Authorization successful for ";
private static final String AUTHZ_FAILED_FOR = "Authorization failed for ";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
index ea78762..58d50cf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
@@ -35,14 +35,15 @@ import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class CrossOriginFilter implements Filter {
- private static final Log LOG = LogFactory.getLog(CrossOriginFilter.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(CrossOriginFilter.class);
// HTTP CORS Request Headers
static final String ORIGIN = "Origin";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
index af8e32c..b0df8f0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
@@ -18,12 +18,12 @@
package org.apache.hadoop.security.ssl;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import javax.net.ssl.KeyManager;
import javax.net.ssl.KeyManagerFactory;
@@ -47,8 +47,8 @@ import java.text.MessageFormat;
@InterfaceStability.Evolving
public class FileBasedKeyStoresFactory implements KeyStoresFactory {
- private static final Log LOG =
- LogFactory.getLog(FileBasedKeyStoresFactory.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(FileBasedKeyStoresFactory.class);
public static final String SSL_KEYSTORE_LOCATION_TPL_KEY =
"ssl.{0}.keystore.location";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
index 2d3afea..88b045e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
@@ -18,12 +18,12 @@
package org.apache.hadoop.security.ssl;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import javax.net.ssl.TrustManager;
import javax.net.ssl.TrustManagerFactory;
@@ -47,7 +47,8 @@ public final class ReloadingX509TrustManager
implements X509TrustManager, Runnable {
@VisibleForTesting
- static final Log LOG = LogFactory.getLog(ReloadingX509TrustManager.class);
+ static final Logger LOG =
+ LoggerFactory.getLogger(ReloadingX509TrustManager.class);
@VisibleForTesting
static final String RELOAD_ERROR_MESSAGE =
"Could not load truststore (keep using existing one) : ";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
index 1396054..d128cc9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
@@ -27,20 +27,21 @@ import java.util.Date;
import java.util.ServiceLoader;
import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* DtFileOperations is a collection of delegation token file operations.
*/
public final class DtFileOperations {
- private static final Log LOG = LogFactory.getLog(DtFileOperations.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(DtFileOperations.class);
/** No public constructor as per checkstyle. */
private DtFileOperations() { }
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtUtilShell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtUtilShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtUtilShell.java
index f00e6fd..88db34f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtUtilShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtUtilShell.java
@@ -22,19 +22,19 @@ import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.CommandShell;
import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* DtUtilShell is a set of command line token file management operations.
*/
public class DtUtilShell extends CommandShell {
- private static final Log LOG = LogFactory.getLog(DtUtilShell.class);
+ private static final Logger LOG = LoggerFactory.getLogger(DtUtilShell.class);
private static final String FORMAT_SUBSTRING = "[-format (" +
DtFileOperations.FORMAT_JAVA + "|" +
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
index 99cc8c7..33cb9ec 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
@@ -23,14 +23,14 @@ import com.google.protobuf.ByteString;
import com.google.common.primitives.Bytes;
import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.*;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.*;
import java.util.Arrays;
@@ -44,7 +44,7 @@ import java.util.UUID;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class Token<T extends TokenIdentifier> implements Writable {
- public static final Log LOG = LogFactory.getLog(Token.class);
+ public static final Logger LOG = LoggerFactory.getLogger(Token.class);
private static Map<Text, Class<? extends TokenIdentifier>> tokenKindMap;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
index cf88745..f06681b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
@@ -30,8 +30,6 @@ import java.util.Set;
import javax.crypto.SecretKey;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
@@ -43,6 +41,8 @@ import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.Time;
import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@InterfaceAudience.Public
@InterfaceStability.Evolving
@@ -50,8 +50,8 @@ public abstract
class AbstractDelegationTokenSecretManager<TokenIdent
extends AbstractDelegationTokenIdentifier>
extends SecretManager<TokenIdent> {
- private static final Log LOG = LogFactory
- .getLog(AbstractDelegationTokenSecretManager.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(AbstractDelegationTokenSecretManager.class);
private String formatTokenId(TokenIdent id) {
return "(" + id + ")";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
index 1327683..2a1140f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
@@ -25,13 +25,13 @@ import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This is the base implementation class for services.
@@ -40,7 +40,8 @@ import com.google.common.annotations.VisibleForTesting;
@Evolving
public abstract class AbstractService implements Service {
- private static final Log LOG = LogFactory.getLog(AbstractService.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(AbstractService.class);
/**
* Service name.
@@ -258,7 +259,7 @@ public abstract class AbstractService implements Service {
*/
protected final void noteFailure(Exception exception) {
if (LOG.isDebugEnabled()) {
- LOG.debug("noteFailure " + exception, null);
+ LOG.debug("noteFailure " + exception, (Throwable) null);
}
if (exception == null) {
//make sure failure logic doesn't itself cause problems
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java
index 51cb4a3..a5e8c89 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java
@@ -21,11 +21,11 @@ package org.apache.hadoop.service;
import java.util.ArrayList;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Composition of services.
@@ -34,7 +34,8 @@ import org.apache.hadoop.conf.Configuration;
@Evolving
public class CompositeService extends AbstractService {
- private static final Log LOG = LogFactory.getLog(CompositeService.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(CompositeService.class);
/**
* Policy on shutdown: attempt to close everything (purest) or
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/LoggingStateChangeListener.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/LoggingStateChangeListener.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/LoggingStateChangeListener.java
index 700999d..c978fec 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/LoggingStateChangeListener.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/LoggingStateChangeListener.java
@@ -18,10 +18,10 @@
package org.apache.hadoop.service;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This is a state change listener that logs events at INFO level
@@ -30,15 +30,16 @@ import org.apache.hadoop.classification.InterfaceStability.Evolving;
@Evolving
public class LoggingStateChangeListener implements ServiceStateChangeListener {
- private static final Log LOG = LogFactory.getLog(LoggingStateChangeListener.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(LoggingStateChangeListener.class);
- private final Log log;
+ private final Logger log;
/**
* Log events to the given log
* @param log destination for events
*/
- public LoggingStateChangeListener(Log log) {
+ public LoggingStateChangeListener(Logger log) {
//force an NPE if a null log came in
log.isDebugEnabled();
this.log = log;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
index a0a77ce..e7683a2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
@@ -22,10 +22,10 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class contains a set of methods to work with services, especially
@@ -34,7 +34,8 @@ import org.slf4j.Logger;
@Public
@Evolving
public final class ServiceOperations {
- private static final Log LOG = LogFactory.getLog(AbstractService.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(AbstractService.class);
private ServiceOperations() {
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TracerConfigurationManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TracerConfigurationManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TracerConfigurationManager.java
index 75601ad..658e4d3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TracerConfigurationManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TracerConfigurationManager.java
@@ -19,13 +19,13 @@ package org.apache.hadoop.tracing;
import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
import org.apache.htrace.core.SpanReceiver;
import org.apache.htrace.core.TracerPool;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class provides functions for managing the tracer configuration at
@@ -33,8 +33,8 @@ import org.apache.htrace.core.TracerPool;
*/
@InterfaceAudience.Private
public class TracerConfigurationManager implements TraceAdminProtocol {
- private static final Log LOG =
- LogFactory.getLog(TracerConfigurationManager.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TracerConfigurationManager.class);
private final String confPrefix;
private final Configuration conf;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
index 2f46e1f..972bbff 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
@@ -29,12 +29,12 @@ import java.util.Arrays;
import java.util.List;
import java.util.Properties;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A {@link URLClassLoader} for application isolation. Classes from the
@@ -56,8 +56,8 @@ public class ApplicationClassLoader extends URLClassLoader {
private static final String SYSTEM_CLASSES_DEFAULT_KEY =
"system.classes.default";
- private static final Log LOG =
- LogFactory.getLog(ApplicationClassLoader.class.getName());
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ApplicationClassLoader.class.getName());
static {
try (InputStream is = ApplicationClassLoader.class.getClassLoader()
@@ -179,7 +179,7 @@ public class ApplicationClassLoader extends URLClassLoader {
}
} catch (ClassNotFoundException e) {
if (LOG.isDebugEnabled()) {
- LOG.debug(e);
+ LOG.debug(e.toString());
}
ex = e;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java
index df15166..8e48cb9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java
@@ -26,10 +26,10 @@ import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/*
* This class is a container of multiple thread pools, each for a volume,
@@ -43,7 +43,8 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceStability.Unstable
public class AsyncDiskService {
- public static final Log LOG = LogFactory.getLog(AsyncDiskService.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(AsyncDiskService.class);
// ThreadPool core pool size
private static final int CORE_THREADS_PER_VOLUME = 1;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CombinedIPWhiteList.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CombinedIPWhiteList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CombinedIPWhiteList.java
index d12c4c1..6d42dc0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CombinedIPWhiteList.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CombinedIPWhiteList.java
@@ -17,12 +17,13 @@
*/
package org.apache.hadoop.util;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class CombinedIPWhiteList implements IPList {
- public static final Log LOG = LogFactory.getLog(CombinedIPWhiteList.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(CombinedIPWhiteList.class);
private static final String LOCALHOST_IP = "127.0.0.1";
private final IPList[] networkLists;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
index 6ee1212..146f65c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
@@ -17,6 +17,9 @@
*/
package org.apache.hadoop.util;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
@@ -29,9 +32,6 @@ import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
/**
* FileBasedIPList loads a list of subnets in CIDR format and ip addresses from
* a file.
@@ -43,7 +43,8 @@ import org.apache.commons.logging.LogFactory;
*/
public class FileBasedIPList implements IPList {
- private static final Log LOG = LogFactory.getLog(FileBasedIPList.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(FileBasedIPList.class);
private final String fileName;
private final MachineList addressList;
@@ -107,7 +108,7 @@ public class FileBasedIPList implements IPList {
}
}
} catch (IOException ioe) {
- LOG.error(ioe);
+ LOG.error(ioe.toString());
throw ioe;
}
return null;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[29/50] [abbrv] hadoop git commit: YARN-6706. Refactor
ContainerScheduler to make oversubscription change easier. (Haibo Chen via
asuresh)
Posted by xy...@apache.org.
YARN-6706. Refactor ContainerScheduler to make oversubscription change easier. (Haibo Chen via asuresh)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b007921
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b007921
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b007921
Branch: refs/heads/HDFS-7240
Commit: 5b007921cdf01ecc8ed97c164b7d327b8304c529
Parents: ed27f2b
Author: Arun Suresh <as...@apache.org>
Authored: Mon Jul 17 14:07:23 2017 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Mon Jul 17 14:11:14 2017 -0700
----------------------------------------------------------------------
.../scheduler/ContainerScheduler.java | 135 +++++++++++++------
.../TestContainerManagerRecovery.java | 2 +-
.../TestContainerSchedulerQueuing.java | 85 ++++++++++++
3 files changed, 177 insertions(+), 45 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b007921/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
index 24530b3..19243ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
@@ -192,7 +192,9 @@ public class ContainerScheduler extends AbstractService implements
// decrement only if it was a running container
Container completedContainer = runningContainers.remove(container
.getContainerId());
- if (completedContainer != null) {
+ // only a running container releases resources upon completion
+ boolean resourceReleased = completedContainer != null;
+ if (resourceReleased) {
this.utilizationTracker.subtractContainerResource(container);
if (container.getContainerTokenIdentifier().getExecutionType() ==
ExecutionType.OPPORTUNISTIC) {
@@ -218,8 +220,7 @@ public class ContainerScheduler extends AbstractService implements
boolean resourcesAvailable = true;
while (cIter.hasNext() && resourcesAvailable) {
Container container = cIter.next();
- if (this.utilizationTracker.hasResourcesAvailable(container)) {
- startAllocatedContainer(container);
+ if (tryStartContainer(container)) {
cIter.remove();
} else {
resourcesAvailable = false;
@@ -228,50 +229,95 @@ public class ContainerScheduler extends AbstractService implements
return resourcesAvailable;
}
- @VisibleForTesting
- protected void scheduleContainer(Container container) {
- if (maxOppQueueLength <= 0) {
- startAllocatedContainer(container);
- return;
+ private boolean tryStartContainer(Container container) {
+ boolean containerStarted = false;
+ if (resourceAvailableToStartContainer(container)) {
+ startContainer(container);
+ containerStarted = true;
}
- if (queuedGuaranteedContainers.isEmpty() &&
- queuedOpportunisticContainers.isEmpty() &&
- this.utilizationTracker.hasResourcesAvailable(container)) {
- startAllocatedContainer(container);
+ return containerStarted;
+ }
+
+ /**
+ * Check if there is resource available to start a given container
+ * immediately. (This can be extended to include overallocated resources)
+ * @param container the container to start
+ * @return true if container can be launched directly
+ */
+ private boolean resourceAvailableToStartContainer(Container container) {
+ return this.utilizationTracker.hasResourcesAvailable(container);
+ }
+
+ private boolean enqueueContainer(Container container) {
+ boolean isGuaranteedContainer = container.getContainerTokenIdentifier().
+ getExecutionType() == ExecutionType.GUARANTEED;
+
+ boolean isQueued;
+ if (isGuaranteedContainer) {
+ queuedGuaranteedContainers.put(container.getContainerId(), container);
+ isQueued = true;
} else {
- LOG.info("No available resources for container {} to start its execution "
- + "immediately.", container.getContainerId());
- boolean isQueued = true;
- if (container.getContainerTokenIdentifier().getExecutionType() ==
- ExecutionType.GUARANTEED) {
- queuedGuaranteedContainers.put(container.getContainerId(), container);
- // Kill running opportunistic containers to make space for
- // guaranteed container.
- killOpportunisticContainers(container);
+ if (queuedOpportunisticContainers.size() < maxOppQueueLength) {
+ LOG.info("Opportunistic container {} will be queued at the NM.",
+ container.getContainerId());
+ queuedOpportunisticContainers.put(
+ container.getContainerId(), container);
+ isQueued = true;
} else {
- if (queuedOpportunisticContainers.size() <= maxOppQueueLength) {
- LOG.info("Opportunistic container {} will be queued at the NM.",
- container.getContainerId());
- queuedOpportunisticContainers.put(
- container.getContainerId(), container);
- } else {
- isQueued = false;
- LOG.info("Opportunistic container [{}] will not be queued at the NM" +
- "since max queue length [{}] has been reached",
- container.getContainerId(), maxOppQueueLength);
- container.sendKillEvent(
- ContainerExitStatus.KILLED_BY_CONTAINER_SCHEDULER,
- "Opportunistic container queue is full.");
- }
+ LOG.info("Opportunistic container [{}] will not be queued at the NM" +
+ "since max queue length [{}] has been reached",
+ container.getContainerId(), maxOppQueueLength);
+ container.sendKillEvent(
+ ContainerExitStatus.KILLED_BY_CONTAINER_SCHEDULER,
+ "Opportunistic container queue is full.");
+ isQueued = false;
}
- if (isQueued) {
- try {
- this.context.getNMStateStore().storeContainerQueued(
- container.getContainerId());
- } catch (IOException e) {
- LOG.warn("Could not store container [" + container.getContainerId()
- + "] state. The Container has been queued.", e);
- }
+ }
+
+ if (isQueued) {
+ try {
+ this.context.getNMStateStore().storeContainerQueued(
+ container.getContainerId());
+ } catch (IOException e) {
+ LOG.warn("Could not store container [" + container.getContainerId()
+ + "] state. The Container has been queued.", e);
+ }
+ }
+
+ return isQueued;
+ }
+
+ @VisibleForTesting
+ protected void scheduleContainer(Container container) {
+ boolean isGuaranteedContainer = container.getContainerTokenIdentifier().
+ getExecutionType() == ExecutionType.GUARANTEED;
+
+ // Given a guaranteed container, we enqueue it first and then try to start
+ // as many queuing guaranteed containers as possible followed by queuing
+ // opportunistic containers based on remaining resources available. If the
+ // container still stays in the queue afterwards, we need to preempt just
+ // enough number of opportunistic containers.
+ if (isGuaranteedContainer) {
+ enqueueContainer(container);
+ startPendingContainers();
+
+ // if the guaranteed container is queued, we need to preempt opportunistic
+ // containers for make room for it
+ if (queuedGuaranteedContainers.containsKey(container.getContainerId())) {
+ killOpportunisticContainers(container);
+ }
+ } else {
+ // Given an opportunistic container, we first try to start as many queuing
+ // guaranteed containers as possible followed by queuing opportunistic
+ // containers based on remaining resource available, then enqueue the
+ // opportunistic container. If the container is enqueued, we do another
+ // pass to try to start the newly enqueued opportunistic container.
+ startPendingContainers();
+ boolean containerQueued = enqueueContainer(container);
+ // container may not get queued because the max opportunistic container
+ // queue length is reached. If so, there is no point doing another pass
+ if (containerQueued) {
+ startPendingContainers();
}
}
}
@@ -292,7 +338,7 @@ public class ContainerScheduler extends AbstractService implements
}
}
- private void startAllocatedContainer(Container container) {
+ private void startContainer(Container container) {
LOG.info("Starting container [" + container.getContainerId()+ "]");
runningContainers.put(container.getContainerId(), container);
this.utilizationTracker.addContainerResources(container);
@@ -416,4 +462,5 @@ public class ContainerScheduler extends AbstractService implements
public ContainersMonitor getContainersMonitor() {
return this.context.getContainerManager().getContainersMonitor();
}
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b007921/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
index 075d857..b1a7b4b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
@@ -583,7 +583,7 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest {
@Override
public long getPmemAllocatedForContainers() {
- return 10240;
+ return (long) 2048 << 20;
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b007921/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
index 8264f2e..aeba399 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
@@ -332,6 +332,91 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
}
/**
+ * Starts one GUARANTEED container that takes us the whole node's resources.
+ * and submit more OPPORTUNISTIC containers than the opportunistic container
+ * queue can hold. OPPORTUNISTIC containers that cannot be queue should be
+ * killed.
+ * @throws Exception
+ */
+ @Test
+ public void testStartOpportunistcsWhenOppQueueIsFull() throws Exception {
+ containerManager.start();
+
+ ContainerLaunchContext containerLaunchContext =
+ recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+ List<StartContainerRequest> list = new ArrayList<>();
+ list.add(StartContainerRequest.newInstance(
+ containerLaunchContext,
+ createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
+ context.getNodeId(),
+ user, BuilderUtils.newResource(2048, 1),
+ context.getContainerTokenSecretManager(), null,
+ ExecutionType.GUARANTEED)));
+
+ final int maxOppQueueLength = conf.getInt(
+ YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH,
+ YarnConfiguration.DEFAULT_NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH);
+ for (int i = 1; i < maxOppQueueLength + 2; i++) {
+ list.add(StartContainerRequest.newInstance(
+ containerLaunchContext,
+ createContainerToken(createContainerId(i), DUMMY_RM_IDENTIFIER,
+ context.getNodeId(),
+ user, BuilderUtils.newResource(2048, 1),
+ context.getContainerTokenSecretManager(), null,
+ ExecutionType.OPPORTUNISTIC)));
+ }
+
+ StartContainersRequest allRequests =
+ StartContainersRequest.newInstance(list);
+ containerManager.startContainers(allRequests);
+
+ BaseContainerManagerTest.waitForNMContainerState(containerManager,
+ createContainerId(0), ContainerState.RUNNING, 40);
+ BaseContainerManagerTest.waitForNMContainerState(containerManager,
+ createContainerId(maxOppQueueLength + 1), ContainerState.DONE,
+ 40);
+ Thread.sleep(5000);
+
+ // Get container statuses. Container 0 should be running and container
+ // 1 to maxOppQueueLength should be queued and the last container should
+ // be killed
+ List<ContainerId> statList = new ArrayList<>();
+ for (int i = 0; i < maxOppQueueLength + 2; i++) {
+ statList.add(createContainerId(i));
+ }
+ GetContainerStatusesRequest statRequest =
+ GetContainerStatusesRequest.newInstance(statList);
+ List<ContainerStatus> containerStatuses = containerManager
+ .getContainerStatuses(statRequest).getContainerStatuses();
+ for (ContainerStatus status : containerStatuses) {
+ if (status.getContainerId().equals(createContainerId(0))) {
+ Assert.assertEquals(
+ org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
+ status.getState());
+ } else if (status.getContainerId().equals(createContainerId(
+ maxOppQueueLength + 1))) {
+ Assert.assertTrue(status.getDiagnostics().contains(
+ "Opportunistic container queue is full"));
+ } else {
+ Assert.assertEquals(
+ org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED,
+ status.getState());
+ }
+ System.out.println("\nStatus : [" + status + "]\n");
+ }
+
+ ContainerScheduler containerScheduler =
+ containerManager.getContainerScheduler();
+ Assert.assertEquals(maxOppQueueLength,
+ containerScheduler.getNumQueuedContainers());
+ Assert.assertEquals(0,
+ containerScheduler.getNumQueuedGuaranteedContainers());
+ Assert.assertEquals(maxOppQueueLength,
+ containerScheduler.getNumQueuedOpportunisticContainers());
+ }
+
+ /**
* Submit two OPPORTUNISTIC and one GUARANTEED containers. The resources
* requests by each container as such that only one can run in parallel.
* Thus, the OPPORTUNISTIC container that started running, will be
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[32/50] [abbrv] hadoop git commit: HADOOP-14539. Move commons logging
APIs over to slf4j in hadoop-common. Contributed by Wenxin He.
Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
index e4a8d0f..fbc1418 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
@@ -19,9 +19,9 @@ package org.apache.hadoop.util;
import java.util.Collection;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A {@link GSet} is set,
@@ -35,7 +35,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
*/
@InterfaceAudience.Private
public interface GSet<K, E extends K> extends Iterable<E> {
- static final Log LOG = LogFactory.getLog(GSet.class);
+ Logger LOG = LoggerFactory.getLogger(GSet.class);
/**
* @return The size of this set.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
index 835206a..ac9776f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
@@ -34,8 +34,6 @@ import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -45,6 +43,8 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* <code>GenericOptionsParser</code> is a utility to parse command line
@@ -113,7 +113,8 @@ import org.apache.hadoop.security.UserGroupInformation;
@InterfaceStability.Evolving
public class GenericOptionsParser {
- private static final Log LOG = LogFactory.getLog(GenericOptionsParser.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(GenericOptionsParser.class);
private Configuration conf;
private CommandLine commandLine;
private final boolean parseSuccessful;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
index 340f792..67b0247 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
@@ -31,11 +31,11 @@ import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
@@ -47,7 +47,8 @@ import org.xml.sax.SAXException;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class HostsFileReader {
- private static final Log LOG = LogFactory.getLog(HostsFileReader.class);
+ private static final Logger LOG = LoggerFactory.getLogger(HostsFileReader
+ .class);
private final AtomicReference<HostDetails> current;
@@ -171,7 +172,7 @@ public class HostsFileReader {
}
}
} catch (IOException|SAXException|ParserConfigurationException e) {
- LOG.fatal("error parsing " + filename, e);
+ LOG.error("error parsing " + filename, e);
throw new RuntimeException(e);
} finally {
fileInputStream.close();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java
index 0512d4a..1ffb7db 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java
@@ -21,11 +21,11 @@ import java.util.Collection;
import java.util.Iterator;
import java.util.NoSuchElementException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Implements an intrusive doubly-linked list.
@@ -298,7 +298,8 @@ public class IntrusiveCollection<E extends IntrusiveCollection.Element>
return true;
}
- public static final Log LOG = LogFactory.getLog(IntrusiveCollection.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(IntrusiveCollection.class);
@Override
public boolean remove(Object o) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
index 80d4468..420ac8b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
@@ -24,8 +24,6 @@ import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
@@ -35,6 +33,8 @@ import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Class which sets up a simple thread which runs in a loop sleeping
@@ -45,7 +45,7 @@ import com.google.common.collect.Sets;
*/
@InterfaceAudience.Private
public class JvmPauseMonitor extends AbstractService {
- private static final Log LOG = LogFactory.getLog(
+ private static final Logger LOG = LoggerFactory.getLogger(
JvmPauseMonitor.class);
/** The target sleep time */
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/MachineList.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/MachineList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/MachineList.java
index 2e6c079..b01330f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/MachineList.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/MachineList.java
@@ -26,12 +26,12 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Set;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.commons.net.util.SubnetUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.net.InetAddresses;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Container class which holds a list of ip/host addresses and
@@ -43,7 +43,7 @@ import com.google.common.net.InetAddresses;
public class MachineList {
- public static final Log LOG = LogFactory.getLog(MachineList.class);
+ public static final Logger LOG = LoggerFactory.getLogger(MachineList.class);
public static final String WILDCARD_VALUE = "*";
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
index c381336..a8a380e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
@@ -18,10 +18,10 @@
package org.apache.hadoop.util;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A helper to load the native hadoop code i.e. libhadoop.so.
@@ -33,8 +33,8 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceStability.Unstable
public final class NativeCodeLoader {
- private static final Log LOG =
- LogFactory.getLog(NativeCodeLoader.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(NativeCodeLoader.class);
private static boolean nativeCodeLoaded = false;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
index fc392c4..cf1e460 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
@@ -25,8 +25,6 @@ import java.util.Arrays;
import java.util.Timer;
import java.util.TimerTask;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.service.AbstractService;
@@ -34,6 +32,8 @@ import org.apache.hadoop.util.Shell.ExitCodeException;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
*
@@ -43,7 +43,8 @@ import org.apache.hadoop.util.StringUtils;
*/
public class NodeHealthScriptRunner extends AbstractService {
- private static Log LOG = LogFactory.getLog(NodeHealthScriptRunner.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(NodeHealthScriptRunner.class);
/** Absolute path to the health script. */
private String nodeHealthScript;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progress.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progress.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progress.java
index 3fbc935..bd1c0f4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progress.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progress.java
@@ -20,10 +20,10 @@ package org.apache.hadoop.util;
import java.util.ArrayList;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** Utility to assist with generation of progress reports. Applications build
* a hierarchy of {@link Progress} instances, each modelling a phase of
@@ -33,7 +33,7 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public class Progress {
- private static final Log LOG = LogFactory.getLog(Progress.class);
+ private static final Logger LOG = LoggerFactory.getLogger(Progress.class);
private String status = "";
private float progress;
private int currentPhase;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
index 81983f0..153f92b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
@@ -18,9 +18,9 @@
package org.apache.hadoop.util;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.concurrent.HadoopExecutors;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collections;
@@ -47,7 +47,8 @@ public class ShutdownHookManager {
private static final ShutdownHookManager MGR = new ShutdownHookManager();
- private static final Log LOG = LogFactory.getLog(ShutdownHookManager.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ShutdownHookManager.class);
private static final long TIMEOUT_DEFAULT = 10;
private static final TimeUnit TIME_UNIT_DEFAULT = TimeUnit.SECONDS;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java
index ffd88fb..5405d77 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java
@@ -19,8 +19,8 @@
package org.apache.hadoop.util;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
@@ -29,7 +29,8 @@ import java.util.concurrent.TimeUnit;
* Helper class to shutdown {@link Thread}s and {@link ExecutorService}s.
*/
public class ShutdownThreadsHelper {
- private static Log LOG = LogFactory.getLog(ShutdownThreadsHelper.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ShutdownThreadsHelper.class);
@VisibleForTesting
static final int SHUTDOWN_WAIT_MS = 3000;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
index bba1631..7fd1990 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
@@ -32,11 +32,11 @@ import java.util.regex.Pattern;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Plugin to calculate resource information on Linux systems.
@@ -44,8 +44,8 @@ import org.apache.hadoop.util.Shell.ShellCommandExecutor;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class SysInfoLinux extends SysInfo {
- private static final Log LOG =
- LogFactory.getLog(SysInfoLinux.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SysInfoLinux.class);
/**
* proc's meminfo virtual file has keys-values in the format
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
index bce2d6d..e894014 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
@@ -21,11 +21,11 @@ import java.io.IOException;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Plugin to calculate resource information on Windows systems.
@@ -34,7 +34,8 @@ import org.apache.hadoop.util.Shell.ShellCommandExecutor;
@InterfaceStability.Evolving
public class SysInfoWindows extends SysInfo {
- private static final Log LOG = LogFactory.getLog(SysInfoWindows.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SysInfoWindows.class);
private long vmemSize;
private long memSize;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
index ab7b5fd..6444428 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
@@ -17,10 +17,9 @@
*/
package org.apache.hadoop.util;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.InputStream;
@@ -28,7 +27,7 @@ import java.io.InputStream;
@InterfaceStability.Evolving
public class ThreadUtil {
- private static final Log LOG = LogFactory.getLog(ThreadUtil.class);
+ private static final Logger LOG = LoggerFactory.getLogger(ThreadUtil.class);
/**
* Cause the current thread to sleep as close as possible to the provided
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
index 10e2590..ca09050 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
@@ -22,11 +22,11 @@ import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.IOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class returns build information about Hadoop components.
@@ -34,7 +34,7 @@ import org.apache.hadoop.io.IOUtils;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class VersionInfo {
- private static final Log LOG = LogFactory.getLog(VersionInfo.class);
+ private static final Logger LOG = LoggerFactory.getLogger(VersionInfo.class);
private Properties info;
@@ -46,7 +46,7 @@ public class VersionInfo {
is = ThreadUtil.getResourceAsStream(versionInfoFile);
info.load(is);
} catch (IOException ex) {
- LogFactory.getLog(getClass()).warn("Could not read '" +
+ LoggerFactory.getLogger(getClass()).warn("Could not read '" +
versionInfoFile + "', " + ex.toString(), ex);
} finally {
IOUtils.closeStream(is);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/AsyncGetFuture.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/AsyncGetFuture.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/AsyncGetFuture.java
index d687867..61eb777 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/AsyncGetFuture.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/AsyncGetFuture.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.util.concurrent;
import com.google.common.util.concurrent.AbstractFuture;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
@@ -29,7 +29,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
/** A {@link Future} implemented using an {@link AsyncGet} object. */
public class AsyncGetFuture<T, E extends Throwable> extends AbstractFuture<T> {
- public static final Log LOG = LogFactory.getLog(AsyncGetFuture.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(AsyncGetFuture.class);
private final AtomicBoolean called = new AtomicBoolean(false);
private final AsyncGet<T, E> asyncGet;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/ExecutorHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/ExecutorHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/ExecutorHelper.java
index 3bc9ed9..02452a3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/ExecutorHelper.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/ExecutorHelper.java
@@ -20,8 +20,8 @@
package org.apache.hadoop.util.concurrent;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
@@ -29,8 +29,8 @@ import java.util.concurrent.Future;
/** Helper functions for Executors. */
public final class ExecutorHelper {
- private static final Log LOG = LogFactory
- .getLog(ExecutorHelper.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(ExecutorHelper.class);
static void logThrowableFromAfterExecute(Runnable r, Throwable t) {
if (LOG.isDebugEnabled()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopScheduledThreadPoolExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopScheduledThreadPoolExecutor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopScheduledThreadPoolExecutor.java
index 8d910b6..78e729b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopScheduledThreadPoolExecutor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopScheduledThreadPoolExecutor.java
@@ -20,8 +20,8 @@
package org.apache.hadoop.util.concurrent;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.ScheduledThreadPoolExecutor;
@@ -32,8 +32,8 @@ import java.util.concurrent.ThreadFactory;
public class HadoopScheduledThreadPoolExecutor extends
ScheduledThreadPoolExecutor {
- private static final Log LOG = LogFactory
- .getLog(HadoopScheduledThreadPoolExecutor.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(HadoopScheduledThreadPoolExecutor.class);
public HadoopScheduledThreadPoolExecutor(int corePoolSize) {
super(corePoolSize);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThreadPoolExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThreadPoolExecutor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThreadPoolExecutor.java
index bcf26cb..fa845b7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThreadPoolExecutor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopThreadPoolExecutor.java
@@ -20,8 +20,8 @@
package org.apache.hadoop.util.concurrent;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.RejectedExecutionHandler;
@@ -34,8 +34,8 @@ import java.util.concurrent.TimeUnit;
* */
public final class HadoopThreadPoolExecutor extends ThreadPoolExecutor {
- private static final Log LOG = LogFactory
- .getLog(HadoopThreadPoolExecutor.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(HadoopThreadPoolExecutor.class);
public HadoopThreadPoolExecutor(int corePoolSize,
int maximumPoolSize,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
index f9c8c16..9183524 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
@@ -26,8 +26,6 @@ import java.nio.ByteOrder;
import java.util.EnumSet;
import java.util.Random;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.ByteBufferReadable;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSExceptionMessages;
@@ -43,9 +41,11 @@ import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public abstract class CryptoStreamsTestBase {
- protected static final Log LOG = LogFactory.getLog(
+ protected static final Logger LOG = LoggerFactory.getLogger(
CryptoStreamsTestBase.class);
protected static CryptoCodec codec;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java
index 52e547b..eca23a7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java
@@ -31,8 +31,6 @@ import java.util.HashMap;
import java.util.Map;
import java.util.Random;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
@@ -46,9 +44,12 @@ import org.junit.Before;
import org.junit.Test;
import com.google.common.primitives.Longs;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestCryptoCodec {
- private static final Log LOG= LogFactory.getLog(TestCryptoCodec.class);
+ private static final Logger LOG= LoggerFactory.getLogger(TestCryptoCodec
+ .class);
private static byte[] key = new byte[16];
private static byte[] iv = new byte[16];
private static final int bufferSize = 4096;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java
index 2e208d2..73fd280 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java
@@ -32,8 +32,6 @@ import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
@@ -41,6 +39,8 @@ import org.junit.Test;
import com.google.common.base.Supplier;
import com.google.common.util.concurrent.Uninterruptibles;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* <p>
@@ -48,7 +48,8 @@ import com.google.common.util.concurrent.Uninterruptibles;
* </p>
*/
public abstract class FCStatisticsBaseTest {
- private static final Log LOG = LogFactory.getLog(FCStatisticsBaseTest.class);
+ private static final Logger LOG = LoggerFactory.getLogger(FCStatisticsBaseTest
+ .class);
static protected int blockSize = 512;
static protected int numBlocks = 1;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java
index 584ca40..f5fb06f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java
@@ -17,15 +17,16 @@
*/
package org.apache.hadoop.fs;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.junit.Assert.fail;
public class TestFileContext {
- private static final Log LOG = LogFactory.getLog(TestFileContext.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TestFileContext
+ .class);
@Test
public void testDefaultURIWithoutScheme() throws Exception {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
index d29b1a4..1962f49 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
@@ -33,16 +33,16 @@ import java.util.Collections;
import java.util.List;
import org.junit.Test;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestFileStatus {
- private static final Log LOG =
- LogFactory.getLog(TestFileStatus.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestFileStatus.class);
/** Values for creating {@link FileStatus} in some tests */
static final int LENGTH = 1;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
index 3c733d2..0ad03fc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
@@ -47,8 +47,6 @@ import java.util.jar.Manifest;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
@@ -59,9 +57,11 @@ import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestFileUtil {
- private static final Log LOG = LogFactory.getLog(TestFileUtil.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TestFileUtil.class);
private static final File TEST_DIR = GenericTestUtils.getTestDir("fu");
private static final String FILE = "x";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
index 1db72d2..f9b2420 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
@@ -31,8 +31,6 @@ import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.test.GenericTestUtils;
@@ -40,9 +38,11 @@ import org.apache.hadoop.util.StringUtils;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestFsShellCopy {
- static final Log LOG = LogFactory.getLog(TestFsShellCopy.class);
+ static final Logger LOG = LoggerFactory.getLogger(TestFsShellCopy.class);
static Configuration conf;
static FsShell shell;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java
index 38ad65b..77b2f44 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java
@@ -33,8 +33,6 @@ import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.shell.CommandFactory;
import org.apache.hadoop.fs.shell.FsCommand;
@@ -46,14 +44,16 @@ import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This test validates that chmod, chown, chgrp returning correct exit codes
*
*/
public class TestFsShellReturnCode {
- private static final Log LOG = LogFactory
- .getLog("org.apache.hadoop.fs.TestFsShellReturnCode");
+ private static final Logger LOG = LoggerFactory
+ .getLogger("org.apache.hadoop.fs.TestFsShellReturnCode");
private static final Configuration conf = new Configuration();
private static FileSystem fileSys;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java
index 89c886e..5fe4e39 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellTouch.java
@@ -21,17 +21,17 @@ import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
import static org.junit.Assert.assertThat;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestFsShellTouch {
- static final Log LOG = LogFactory.getLog(TestFsShellTouch.class);
+ static final Logger LOG = LoggerFactory.getLogger(TestFsShellTouch.class);
static FsShell shell;
static LocalFileSystem lfs;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
index bacdbb7..a1aa4de 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.fs;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
@@ -30,6 +28,8 @@ import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
import org.junit.Assert;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.lang.reflect.Method;
@@ -48,7 +48,8 @@ import static org.junit.Assert.assertTrue;
@SuppressWarnings("deprecation")
public class TestHarFileSystem {
- public static final Log LOG = LogFactory.getLog(TestHarFileSystem.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestHarFileSystem.class);
/**
* FileSystem methods that must not be overwritten by
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractBondedFSContract.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractBondedFSContract.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractBondedFSContract.java
index e7766f3..3d202df 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractBondedFSContract.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractBondedFSContract.java
@@ -18,11 +18,11 @@
package org.apache.hadoop.fs.contract;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
@@ -39,8 +39,8 @@ import java.net.URISyntaxException;
*/
public abstract class AbstractBondedFSContract extends AbstractFSContract {
- private static final Log LOG =
- LogFactory.getLog(AbstractBondedFSContract.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(AbstractBondedFSContract.class);
/**
* Pattern for the option for test filesystems from schema
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
index ca01702..6da5182 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java
@@ -32,8 +32,6 @@ import java.util.Arrays;
import java.util.EnumSet;
import java.util.Random;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.CreateFlag;
@@ -49,6 +47,8 @@ import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** The load generator is a tool for testing NameNode behavior under
* different client loads. Note there is a subclass of this clas that lets
@@ -129,7 +129,7 @@ import com.google.common.base.Preconditions;
* -scriptFile <file name>: text file to parse for scripted operation
*/
public class LoadGenerator extends Configured implements Tool {
- public static final Log LOG = LogFactory.getLog(LoadGenerator.class);
+ public static final Logger LOG = LoggerFactory.getLogger(LoadGenerator.class);
private volatile static boolean shouldRun = true;
protected static Path root = DataGenerator.DEFAULT_ROOT;
@@ -341,7 +341,7 @@ public class LoadGenerator extends Configured implements Tool {
executionTime[WRITE_CLOSE] += (Time.now() - startTime);
numOfOps[WRITE_CLOSE]++;
} finally {
- IOUtils.cleanup(LOG, out);
+ IOUtils.cleanupWithLogger(LOG, out);
}
}
}
@@ -651,7 +651,7 @@ public class LoadGenerator extends Configured implements Tool {
System.err.println("Line: " + lineNum + ", " + e.getMessage());
return -1;
} finally {
- IOUtils.cleanup(LOG, br);
+ IOUtils.cleanupWithLogger(LOG, br);
}
// Copy vectors to arrays of values, to avoid autoboxing overhead later
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ActiveStandbyElectorTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ActiveStandbyElectorTestUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ActiveStandbyElectorTestUtil.java
index 2375081..764ad2e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ActiveStandbyElectorTestUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ActiveStandbyElectorTestUtil.java
@@ -19,18 +19,18 @@ package org.apache.hadoop.ha;
import java.util.Arrays;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.data.Stat;
import org.apache.zookeeper.server.ZooKeeperServer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public abstract class ActiveStandbyElectorTestUtil {
- private static final Log LOG = LogFactory.getLog(
+ private static final Logger LOG = LoggerFactory.getLogger(
ActiveStandbyElectorTestUtil.class);
private static final long LOG_INTERVAL_MS = 500;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
index 551da56..6f01be8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
@@ -23,8 +23,6 @@ import java.net.InetSocketAddress;
import java.util.ArrayList;
import com.google.protobuf.BlockingService;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB;
@@ -38,6 +36,8 @@ import org.apache.hadoop.security.AccessControlException;
import org.mockito.Mockito;
import com.google.common.collect.Lists;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HA_HM_RPC_TIMEOUT_DEFAULT;
@@ -46,7 +46,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.HA_HM_RPC_TIMEOUT_DEF
* a mock implementation.
*/
class DummyHAService extends HAServiceTarget {
- public static final Log LOG = LogFactory.getLog(DummyHAService.class);
+ public static final Logger LOG = LoggerFactory.getLogger(DummyHAService
+ .class);
private static final String DUMMY_FENCE_KEY = "dummy.fence.key";
volatile HAServiceState state;
HAServiceProtocol proxy, healthMonitorProxy;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
index a5fbe8f..9146e01 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
@@ -25,8 +25,6 @@ import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
@@ -41,6 +39,8 @@ import org.apache.zookeeper.server.ZooKeeperServer;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Ints;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Harness for starting two dummy ZK FailoverControllers, associated with
@@ -57,7 +57,8 @@ public class MiniZKFCCluster {
private DummySharedResource sharedResource = new DummySharedResource();
- private static final Log LOG = LogFactory.getLog(MiniZKFCCluster.class);
+ private static final Logger LOG = LoggerFactory.getLogger(MiniZKFCCluster
+ .class);
public MiniZKFCCluster(Configuration conf, ZooKeeperServer zks) {
this.conf = conf;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java
index 1d8f48e..0e59aa1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java
@@ -24,8 +24,6 @@ import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
@@ -34,9 +32,11 @@ import org.junit.Test;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestHAAdmin {
- private static final Log LOG = LogFactory.getLog(TestHAAdmin.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TestHAAdmin.class);
private HAAdmin tool;
private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHealthMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHealthMonitor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHealthMonitor.java
index 6c46543..8738372 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHealthMonitor.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHealthMonitor.java
@@ -23,8 +23,6 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
@@ -34,9 +32,11 @@ import org.apache.hadoop.util.Time;
import org.junit.Before;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestHealthMonitor {
- private static final Log LOG = LogFactory.getLog(
+ private static final Logger LOG = LoggerFactory.getLogger(
TestHealthMonitor.class);
/** How many times has createProxy been called */
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java
index 0e4a1ca..7036175 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java
@@ -33,14 +33,14 @@ import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestGlobalFilter extends HttpServerFunctionalTest {
- static final Log LOG = LogFactory.getLog(HttpServer2.class);
+ static final Logger LOG = LoggerFactory.getLogger(HttpServer2.class);
static final Set<String> RECORDS = new TreeSet<String>();
/** A very simple filter that records accessed uri's */
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
index baa6f91..6ec6e0f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
@@ -17,8 +17,6 @@
*/
package org.apache.hadoop.http;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -40,6 +38,8 @@ import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
@@ -73,7 +73,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_S
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
public class TestHttpServer extends HttpServerFunctionalTest {
- static final Log LOG = LogFactory.getLog(TestHttpServer.class);
+ static final Logger LOG = LoggerFactory.getLogger(TestHttpServer.class);
private static HttpServer2 server;
private static final int MAX_THREADS = 10;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLogs.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLogs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLogs.java
index d72a958..afd06ac 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLogs.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLogs.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.http;
import org.apache.http.HttpStatus;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.http.resource.JerseyResource;
@@ -27,12 +25,14 @@ import org.apache.hadoop.net.NetUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.net.HttpURLConnection;
import java.net.URL;
public class TestHttpServerLogs extends HttpServerFunctionalTest {
- static final Log LOG = LogFactory.getLog(TestHttpServerLogs.class);
+ static final Logger LOG = LoggerFactory.getLogger(TestHttpServerLogs.class);
private static HttpServer2 server;
@BeforeClass
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWebapps.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWebapps.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWebapps.java
index c92944e..07dbc2a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWebapps.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWebapps.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.http;
import org.junit.Test;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.FileNotFoundException;
@@ -28,7 +28,8 @@ import java.io.FileNotFoundException;
* Test webapp loading
*/
public class TestHttpServerWebapps extends HttpServerFunctionalTest {
- private static final Log log = LogFactory.getLog(TestHttpServerWebapps.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestHttpServerWebapps.class);
/**
* Test that the test server is loadable on the classpath
@@ -58,7 +59,7 @@ public class TestHttpServerWebapps extends HttpServerFunctionalTest {
stop(server);
fail("Expected an exception, got " + serverDescription);
} catch (FileNotFoundException expected) {
- log.debug("Expected exception " + expected, expected);
+ LOG.debug("Expected exception " + expected, expected);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java
index 3d3e020..5239ed6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java
@@ -17,8 +17,6 @@
*/
package org.apache.hadoop.http;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.minikdc.MiniKdc;
@@ -38,6 +36,8 @@ import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileWriter;
@@ -53,7 +53,8 @@ import static org.junit.Assert.assertTrue;
*/
public class TestHttpServerWithSpengo {
- static final Log LOG = LogFactory.getLog(TestHttpServerWithSpengo.class);
+ static final Logger LOG =
+ LoggerFactory.getLogger(TestHttpServerWithSpengo.class);
private static final String SECRET_STR = "secret";
private static final String HTTP_USER = "HTTP";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java
index 09f31df..4c35b39 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java
@@ -33,14 +33,14 @@ import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestPathFilter extends HttpServerFunctionalTest {
- static final Log LOG = LogFactory.getLog(HttpServer2.class);
+ static final Logger LOG = LoggerFactory.getLogger(HttpServer2.class);
static final Set<String> RECORDS = new TreeSet<String>();
/** A very simple filter that records accessed uri's */
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
index 30aca57..5af6d6f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -32,8 +32,6 @@ import javax.net.ssl.SSLHandshakeException;
import javax.net.ssl.SSLSocket;
import javax.net.ssl.SSLSocketFactory;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.io.IOUtils;
@@ -45,6 +43,8 @@ import org.apache.hadoop.util.StringUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This testcase issues SSL certificates configures the HttpServer to serve
@@ -56,7 +56,8 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
private static final String BASEDIR =
GenericTestUtils.getTempPath(TestSSLHttpServer.class.getSimpleName());
- private static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestSSLHttpServer.class);
private static Configuration conf;
private static HttpServer2 server;
private static String keystoresDir;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java
index f58c230..eafd0ae 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java
@@ -32,15 +32,15 @@ import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestServletFilter extends HttpServerFunctionalTest {
- static final Log LOG = LogFactory.getLog(HttpServer2.class);
+ static final Logger LOG = LoggerFactory.getLogger(HttpServer2.class);
static volatile String uri = null;
/** A very simple filter which record the uri filtered. */
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java
index 607d17f..68a10a1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java
@@ -30,10 +30,10 @@ import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.http.JettyUtils;
import org.eclipse.jetty.util.ajax.JSON;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A simple Jersey resource class TestHttpServer.
@@ -42,7 +42,7 @@ import org.eclipse.jetty.util.ajax.JSON;
*/
@Path("")
public class JerseyResource {
- static final Log LOG = LogFactory.getLog(JerseyResource.class);
+ static final Logger LOG = LoggerFactory.getLogger(JerseyResource.class);
public static final String PATH = "path";
public static final String OP = "op";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayFile.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayFile.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayFile.java
index 505aca7..722e9de 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayFile.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayFile.java
@@ -21,13 +21,15 @@ package org.apache.hadoop.io;
import java.io.*;
-import org.apache.commons.logging.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.conf.*;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertEquals;
@@ -37,7 +39,8 @@ import static org.junit.Assert.fail;
/** Support for flat files of binary key/value pairs. */
public class TestArrayFile {
- private static final Log LOG = LogFactory.getLog(TestArrayFile.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestArrayFile.class);
private static final Path TEST_DIR = new Path(GenericTestUtils.getTempPath(
TestMapFile.class.getSimpleName()));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestDefaultStringifier.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestDefaultStringifier.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestDefaultStringifier.java
index bd8f2ef..b70e011 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestDefaultStringifier.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestDefaultStringifier.java
@@ -21,16 +21,18 @@ package org.apache.hadoop.io;
import java.io.IOException;
import java.util.Random;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import static org.junit.Assert.assertEquals;
public class TestDefaultStringifier {
private static Configuration conf = new Configuration();
- private static final Log LOG = LogFactory.getLog(TestDefaultStringifier.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestDefaultStringifier.class);
private char[] alphabet = "abcdefghijklmnopqrstuvwxyz".toCharArray();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java
index e97ab6a..0448243 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java
@@ -21,8 +21,6 @@ package org.apache.hadoop.io;
import java.io.*;
import java.util.*;
-import org.apache.commons.logging.*;
-
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.SequenceFile.Metadata;
@@ -40,11 +38,14 @@ import static org.junit.Assert.fail;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** Support for flat files of binary key/value pairs. */
public class TestSequenceFile {
- private static final Log LOG = LogFactory.getLog(TestSequenceFile.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestSequenceFile.class);
private Configuration conf = new Configuration();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSetFile.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSetFile.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSetFile.java
index 1fcfab6..b6ec487 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSetFile.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSetFile.java
@@ -21,14 +21,13 @@ package org.apache.hadoop.io;
import java.io.*;
import java.util.*;
-
-import org.apache.commons.logging.*;
-
import org.apache.hadoop.fs.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
@@ -37,7 +36,7 @@ import static org.junit.Assert.fail;
/** Support for flat files of binary key/value pairs. */
public class TestSetFile {
- private static final Log LOG = LogFactory.getLog(TestSetFile.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TestSetFile.class);
private static String FILE = GenericTestUtils.getTempPath("test.set");
private static Configuration conf = new Configuration();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableUtils.java
index 92fb4ec..57359a0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableUtils.java
@@ -20,15 +20,16 @@ package org.apache.hadoop.io;
import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class TestWritableUtils {
- private static final Log LOG = LogFactory.getLog(TestWritableUtils.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestWritableUtils.class);
private void testValue(int val, int vintlen) throws IOException {
DataOutputBuffer buf = new DataOutputBuffer();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
index 1ea9dc8..133ff9a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
@@ -49,8 +49,6 @@ import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileStatus;
@@ -81,10 +79,12 @@ import org.junit.After;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestCodec {
- private static final Log LOG= LogFactory.getLog(TestCodec.class);
+ private static final Logger LOG= LoggerFactory.getLogger(TestCodec.class);
private Configuration conf = new Configuration();
private int count = 10000;
@@ -382,7 +382,7 @@ public class TestCodec {
}
LOG.info("Wrote " + seq + " records to " + file);
} finally {
- IOUtils.cleanup(LOG, fout);
+ IOUtils.cleanupWithLogger(LOG, fout);
CodecPool.returnCompressor(cmp);
}
return file;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java
index dd7bdd2..d56b4e1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressionStreamReuse.java
@@ -24,8 +24,6 @@ import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Random;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
@@ -36,12 +34,15 @@ import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
public class TestCompressionStreamReuse {
- private static final Log LOG = LogFactory
- .getLog(TestCompressionStreamReuse.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(TestCompressionStreamReuse.class);
private Configuration conf = new Configuration();
private int count = 10000;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
index 436f10a..6989905 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
@@ -46,8 +46,6 @@ import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows;
import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FileSystem;
@@ -56,12 +54,14 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.*;
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.Stat.*;
public class TestNativeIO {
- static final Log LOG = LogFactory.getLog(TestNativeIO.class);
+ static final Logger LOG = LoggerFactory.getLogger(TestNativeIO.class);
static final File TEST_DIR = GenericTestUtils.getTestDir("testnativeio");
@@ -619,8 +619,8 @@ public class TestNativeIO {
NativeIO.copyFileUnbuffered(srcFile, dstFile);
Assert.assertEquals(srcFile.length(), dstFile.length());
} finally {
- IOUtils.cleanup(LOG, channel);
- IOUtils.cleanup(LOG, raSrcFile);
+ IOUtils.cleanupWithLogger(LOG, channel);
+ IOUtils.cleanupWithLogger(LOG, raSrcFile);
FileUtils.deleteQuietly(TEST_DIR);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
index 64abecd..fbe3fb8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
@@ -27,14 +27,15 @@ import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import org.apache.commons.lang.SystemUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestSharedFileDescriptorFactory {
- static final Log LOG = LogFactory.getLog(TestSharedFileDescriptorFactory.class);
+ static final Logger LOG =
+ LoggerFactory.getLogger(TestSharedFileDescriptorFactory.class);
private static final File TEST_BASE = GenericTestUtils.getTestDir();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java
index 3f2802f..64c486c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.ipc;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.LongWritable;
@@ -34,6 +32,8 @@ import org.apache.hadoop.util.concurrent.AsyncGetFuture;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.InetSocketAddress;
@@ -49,7 +49,7 @@ import static org.junit.Assert.assertFalse;
public class TestAsyncIPC {
private static Configuration conf;
- private static final Log LOG = LogFactory.getLog(TestAsyncIPC.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TestAsyncIPC.class);
static <T extends Writable> AsyncGetFuture<T, IOException>
getAsyncRpcResponseFuture() {
@@ -183,7 +183,7 @@ public class TestAsyncIPC {
final long param = TestIPC.RANDOM.nextLong();
runCall(i, param);
} catch (Exception e) {
- LOG.fatal(String.format("Caller-%d Call-%d caught: %s", callerId, i,
+ LOG.error(String.format("Caller-%d Call-%d caught: %s", callerId, i,
StringUtils.stringifyException(e)));
failed = true;
}
@@ -219,7 +219,7 @@ public class TestAsyncIPC {
for (int i = start; i < end; i++) {
LongWritable value = returnFutures.get(i).get();
if (expectedValues.get(i) != value.get()) {
- LOG.fatal(String.format("Caller-%d Call-%d failed!", callerId, i));
+ LOG.error(String.format("Caller-%d Call-%d failed!", callerId, i));
failed = true;
break;
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[40/50] [abbrv] hadoop git commit: YARN-6819. Application report
fails if app rejected due to nodesize. Contributed by Bibin A Chundatt.
Posted by xy...@apache.org.
YARN-6819. Application report fails if app rejected due to nodesize. Contributed by Bibin A Chundatt.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/845c4e52
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/845c4e52
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/845c4e52
Branch: refs/heads/HDFS-7240
Commit: 845c4e52bdd579a24df5dbba7477b0ebf2fa16f1
Parents: daaf530
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Wed Jul 19 11:10:52 2017 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Wed Jul 19 11:10:52 2017 +0530
----------------------------------------------------------------------
.../resourcemanager/recovery/RMStateStore.java | 5 ++--
.../resourcemanager/rmapp/RMAppEvent.java | 24 --------------------
.../resourcemanager/rmapp/RMAppEventType.java | 1 +
.../server/resourcemanager/rmapp/RMAppImpl.java | 8 +++----
.../recovery/TestZKRMStateStore.java | 14 +++++++-----
.../rmapp/TestRMAppTransitions.java | 17 ++++++++++++++
6 files changed, 33 insertions(+), 36 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/845c4e52/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index e945b59..d0a8cf5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -221,8 +221,9 @@ public abstract class RMStateStore extends AbstractService {
} catch (Exception e) {
LOG.error("Error storing app: " + appId, e);
if (e instanceof StoreLimitException) {
- store.notifyApplication(new RMAppEvent(appId,
- RMAppEventType.APP_REJECTED, e.getMessage(), false));
+ store.notifyApplication(
+ new RMAppEvent(appId, RMAppEventType.APP_SAVE_FAILED,
+ e.getMessage()));
} else {
isFenced = store.notifyStoreOperationFailedInternal(e);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/845c4e52/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
index 0c6139e..5c46945 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
@@ -25,7 +25,6 @@ public class RMAppEvent extends AbstractEvent<RMAppEventType>{
private final ApplicationId appId;
private final String diagnosticMsg;
- private boolean storeAppInfo;
public RMAppEvent(ApplicationId appId, RMAppEventType type) {
this(appId, type, "");
@@ -36,21 +35,6 @@ public class RMAppEvent extends AbstractEvent<RMAppEventType>{
super(type);
this.appId = appId;
this.diagnosticMsg = diagnostic;
- this.storeAppInfo = true;
- }
-
- /**
- * Constructor to create RM Application Event type.
- *
- * @param appId application Id
- * @param type RM Event type
- * @param diagnostic Diagnostic message for event
- * @param storeApp Application should be saved or not
- */
- public RMAppEvent(ApplicationId appId, RMAppEventType type, String diagnostic,
- boolean storeApp) {
- this(appId, type, diagnostic);
- this.storeAppInfo = storeApp;
}
public ApplicationId getApplicationId() {
@@ -61,12 +45,4 @@ public class RMAppEvent extends AbstractEvent<RMAppEventType>{
return this.diagnosticMsg;
}
- /**
- * Store application to state store or not.
- *
- * @return boolean application should be saved to store.
- */
- public boolean doStoreAppInfo() {
- return storeAppInfo;
- }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/845c4e52/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
index aa5d6f0..04d2db5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
@@ -47,4 +47,5 @@ public enum RMAppEventType {
// Source: RMStateStore
APP_NEW_SAVED,
APP_UPDATE_SAVED,
+ APP_SAVE_FAILED,
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/845c4e52/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index bf8fa4f..fa2f20c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -243,6 +243,8 @@ public class RMAppImpl implements RMApp, Recoverable {
RMAppEventType.APP_REJECTED,
new FinalSavingTransition(new AppRejectedTransition(),
RMAppState.FAILED))
+ .addTransition(RMAppState.NEW_SAVING, RMAppState.FAILED,
+ RMAppEventType.APP_SAVE_FAILED, new AppRejectedTransition())
// Transitions from SUBMITTED state
.addTransition(RMAppState.SUBMITTED, RMAppState.SUBMITTED,
@@ -1307,10 +1309,8 @@ public class RMAppImpl implements RMApp, Recoverable {
@Override
public void transition(RMAppImpl app, RMAppEvent event) {
- if (event.doStoreAppInfo()) {
- app.rememberTargetTransitionsAndStoreState(event, transitionToDo,
- targetedFinalState, stateToBeStored);
- }
+ app.rememberTargetTransitionsAndStoreState(event, transitionToDo,
+ targetedFinalState, stateToBeStored);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/845c4e52/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
index fcd8647..942e9e8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
@@ -270,20 +270,22 @@ public class TestZKRMStateStore extends RMStateStoreTestBase {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
- return dispatcher.apprejectedEvnt;
+ return dispatcher.appsavefailedEvnt;
}
}, 100, 5000);
}
static class TestAppRejDispatcher extends TestDispatcher {
- private boolean apprejectedEvnt;
+ private boolean appsavefailedEvnt;
public void handle(Event event) {
- if (event instanceof RMAppEvent
- && event.getType().equals(RMAppEventType.APP_REJECTED)) {
- apprejectedEvnt = true;
+ if (event instanceof RMAppEvent && event.getType()
+ .equals(RMAppEventType.APP_SAVE_FAILED)) {
+ appsavefailedEvnt = true;
}
- };
+ }
+
+ ;
}
@Test (timeout = 60000)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/845c4e52/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
index 5aa7af9..7c54b60 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
@@ -653,6 +653,23 @@ public class TestRMAppTransitions {
}
@Test (timeout = 30000)
+ public void testAppNewSavingSaveReject() throws IOException {
+ LOG.info("--- START: testAppNewSavingSaveReject ---");
+ RMApp application = testCreateAppNewSaving(null);
+ // NEW_SAVING => FAILED event RMAppEventType.APP_SAVE_FAILED
+ String rejectedText = "Test Application Rejected";
+ RMAppEvent event = new RMAppEvent(application.getApplicationId(),
+ RMAppEventType.APP_SAVE_FAILED, rejectedText);
+ application.handle(event);
+ rmDispatcher.await();
+ assertFailed(application, rejectedText);
+ verify(store, times(0)).updateApplicationState(
+ any(ApplicationStateData.class));
+ verifyApplicationFinished(RMAppState.FAILED);
+ assertTimesAtFinish(application);
+ }
+
+ @Test (timeout = 30000)
public void testAppSubmittedRejected() throws IOException {
LOG.info("--- START: testAppSubmittedRejected ---");
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[42/50] [abbrv] hadoop git commit: HADOOP-14669.
GenericTestUtils.waitFor should use monotonic time. Contributed by Daniel
Templeton
Posted by xy...@apache.org.
HADOOP-14669. GenericTestUtils.waitFor should use monotonic time. Contributed by Daniel Templeton
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df180259
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df180259
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df180259
Branch: refs/heads/HDFS-7240
Commit: df180259b0cc3660e199e85447c7193bee51751c
Parents: 2843c68
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Wed Jul 19 09:41:22 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Wed Jul 19 09:41:22 2017 -0500
----------------------------------------------------------------------
.../src/test/java/org/apache/hadoop/test/GenericTestUtils.java | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/df180259/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 38a0c6c..9291bb0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -356,10 +356,10 @@ public abstract class GenericTestUtils {
Preconditions.checkArgument(waitForMillis >= checkEveryMillis,
ERROR_INVALID_ARGUMENT);
- long st = Time.now();
+ long st = Time.monotonicNow();
boolean result = check.get();
- while (!result && (Time.now() - st < waitForMillis)) {
+ while (!result && (Time.monotonicNow() - st < waitForMillis)) {
Thread.sleep(checkEveryMillis);
result = check.get();
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[35/50] [abbrv] hadoop git commit: HADOOP-14539. Move commons logging
APIs over to slf4j in hadoop-common. Contributed by Wenxin He.
Posted by xy...@apache.org.
HADOOP-14539. Move commons logging APIs over to slf4j in hadoop-common. Contributed by Wenxin He.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ccaf0366
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ccaf0366
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ccaf0366
Branch: refs/heads/HDFS-7240
Commit: ccaf036662e22da14583942054898c99fa51dae5
Parents: 5b00792
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Jul 18 13:32:37 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Jul 18 13:32:37 2017 +0900
----------------------------------------------------------------------
.../org/apache/hadoop/conf/Configuration.java | 17 +++++++++--------
.../apache/hadoop/conf/ReconfigurableBase.java | 7 ++++---
.../hadoop/conf/ReconfigurationServlet.java | 8 ++++----
.../hadoop/crypto/JceAesCtrCryptoCodec.java | 8 ++++----
.../hadoop/crypto/OpensslAesCtrCryptoCodec.java | 8 ++++----
.../org/apache/hadoop/crypto/OpensslCipher.java | 8 ++++----
.../crypto/random/OpensslSecureRandom.java | 8 ++++----
.../hadoop/crypto/random/OsSecureRandom.java | 9 +++++----
.../apache/hadoop/fs/AbstractFileSystem.java | 6 +++---
.../java/org/apache/hadoop/fs/ChecksumFs.java | 8 ++++----
.../hadoop/fs/DelegationTokenRenewer.java | 10 +++++-----
.../org/apache/hadoop/fs/FSInputChecker.java | 9 +++++----
.../java/org/apache/hadoop/fs/FileContext.java | 10 +++++-----
.../java/org/apache/hadoop/fs/FileUtil.java | 10 +++++-----
.../main/java/org/apache/hadoop/fs/FsShell.java | 6 +++---
.../apache/hadoop/fs/FsShellPermissions.java | 4 ++--
.../main/java/org/apache/hadoop/fs/Globber.java | 7 ++++---
.../org/apache/hadoop/fs/HarFileSystem.java | 11 ++++++-----
.../org/apache/hadoop/fs/LocalDirAllocator.java | 9 +++++----
.../main/java/org/apache/hadoop/fs/Trash.java | 7 ++++---
.../apache/hadoop/fs/TrashPolicyDefault.java | 8 ++++----
.../org/apache/hadoop/fs/ftp/FTPFileSystem.java | 8 ++++----
.../hadoop/fs/permission/FsPermission.java | 6 +++---
.../hadoop/fs/sftp/SFTPConnectionPool.java | 7 ++++---
.../apache/hadoop/fs/sftp/SFTPFileSystem.java | 7 ++++---
.../org/apache/hadoop/fs/shell/Command.java | 6 +++---
.../apache/hadoop/ha/ActiveStandbyElector.java | 15 ++++++++-------
.../apache/hadoop/ha/FailoverController.java | 10 +++++-----
.../main/java/org/apache/hadoop/ha/HAAdmin.java | 8 ++++----
.../org/apache/hadoop/ha/HealthMonitor.java | 8 ++++----
.../java/org/apache/hadoop/ha/NodeFencer.java | 6 +++---
.../org/apache/hadoop/ha/SshFenceByTcpPort.java | 11 +++--------
.../apache/hadoop/ha/ZKFailoverController.java | 20 ++++++++++----------
...HAServiceProtocolServerSideTranslatorPB.java | 6 +++---
.../org/apache/hadoop/http/HttpServer2.java | 6 +++---
.../hadoop/http/lib/StaticUserWebFilter.java | 7 ++++---
.../java/org/apache/hadoop/io/BloomMapFile.java | 6 +++---
.../apache/hadoop/io/FastByteComparisons.java | 7 +++----
.../main/java/org/apache/hadoop/io/IOUtils.java | 5 ++---
.../main/java/org/apache/hadoop/io/MapFile.java | 8 ++++----
.../org/apache/hadoop/io/ReadaheadPool.java | 6 +++---
.../java/org/apache/hadoop/io/SequenceFile.java | 7 ++++---
.../main/java/org/apache/hadoop/io/UTF8.java | 5 +++--
.../apache/hadoop/io/compress/CodecPool.java | 6 +++---
.../io/compress/CompressionCodecFactory.java | 8 ++++----
.../apache/hadoop/io/compress/DefaultCodec.java | 6 +++---
.../io/compress/bzip2/Bzip2Compressor.java | 8 ++++----
.../io/compress/bzip2/Bzip2Decompressor.java | 8 ++++----
.../hadoop/io/compress/bzip2/Bzip2Factory.java | 6 +++---
.../hadoop/io/compress/lz4/Lz4Compressor.java | 8 ++++----
.../hadoop/io/compress/lz4/Lz4Decompressor.java | 8 ++++----
.../io/compress/snappy/SnappyCompressor.java | 8 ++++----
.../io/compress/snappy/SnappyDecompressor.java | 8 ++++----
.../io/compress/zlib/BuiltInZlibDeflater.java | 8 ++++----
.../hadoop/io/compress/zlib/ZlibCompressor.java | 8 ++++----
.../hadoop/io/compress/zlib/ZlibFactory.java | 8 ++++----
.../apache/hadoop/io/erasurecode/CodecUtil.java | 6 +++---
.../io/erasurecode/ErasureCodeNative.java | 8 ++++----
.../org/apache/hadoop/io/file/tfile/BCFile.java | 6 +++---
.../hadoop/io/file/tfile/Compression.java | 6 +++---
.../org/apache/hadoop/io/file/tfile/TFile.java | 8 ++++----
.../hadoop/io/file/tfile/TFileDumper.java | 8 ++++----
.../org/apache/hadoop/io/nativeio/NativeIO.java | 16 ++++++++--------
.../nativeio/SharedFileDescriptorFactory.java | 7 ++++---
.../apache/hadoop/io/retry/RetryPolicies.java | 6 +++---
.../org/apache/hadoop/io/retry/RetryUtils.java | 6 +++---
.../io/serializer/SerializationFactory.java | 8 ++++----
.../org/apache/hadoop/ipc/CallQueueManager.java | 7 ++++---
.../main/java/org/apache/hadoop/ipc/Client.java | 6 +++---
.../org/apache/hadoop/ipc/FairCallQueue.java | 6 +++---
.../apache/hadoop/ipc/ProtobufRpcEngine.java | 7 ++++---
.../main/java/org/apache/hadoop/ipc/RPC.java | 6 +++---
.../org/apache/hadoop/ipc/RefreshRegistry.java | 7 ++++---
.../java/org/apache/hadoop/ipc/RetryCache.java | 6 +++---
.../main/java/org/apache/hadoop/ipc/Server.java | 16 ++++++++--------
.../ipc/WeightedRoundRobinMultiplexer.java | 8 ++++----
.../apache/hadoop/ipc/WritableRpcEngine.java | 6 +++---
.../hadoop/ipc/metrics/RetryCacheMetrics.java | 6 +++---
.../hadoop/ipc/metrics/RpcDetailedMetrics.java | 8 ++++----
.../apache/hadoop/ipc/metrics/RpcMetrics.java | 6 +++---
.../org/apache/hadoop/jmx/JMXJsonServlet.java | 7 ++++---
.../hadoop/metrics2/impl/MBeanInfoBuilder.java | 2 +-
.../hadoop/metrics2/impl/MetricsConfig.java | 8 ++++----
.../metrics2/impl/MetricsSinkAdapter.java | 9 +++++----
.../metrics2/impl/MetricsSourceAdapter.java | 7 ++++---
.../hadoop/metrics2/impl/MetricsSystemImpl.java | 6 +++---
.../hadoop/metrics2/lib/MethodMetric.java | 7 ++++---
.../metrics2/lib/MetricsSourceBuilder.java | 7 ++++---
.../metrics2/lib/MutableMetricsFactory.java | 7 ++++---
.../hadoop/metrics2/lib/MutableRates.java | 7 +++----
.../lib/MutableRatesWithAggregation.java | 7 ++++---
.../hadoop/metrics2/sink/GraphiteSink.java | 7 ++++---
.../sink/ganglia/AbstractGangliaSink.java | 10 +++++-----
.../metrics2/sink/ganglia/GangliaSink30.java | 6 +++---
.../metrics2/sink/ganglia/GangliaSink31.java | 7 ++++---
.../org/apache/hadoop/metrics2/util/MBeans.java | 6 +++---
.../hadoop/metrics2/util/MetricsCache.java | 6 +++---
.../main/java/org/apache/hadoop/net/DNS.java | 6 +++---
.../java/org/apache/hadoop/net/NetUtils.java | 6 +++---
.../apache/hadoop/net/ScriptBasedMapping.java | 8 ++++----
.../apache/hadoop/net/SocketIOWithTimeout.java | 6 +++---
.../org/apache/hadoop/net/TableMapping.java | 6 +++---
.../apache/hadoop/net/unix/DomainSocket.java | 10 +++++-----
.../hadoop/net/unix/DomainSocketWatcher.java | 12 ++++++------
.../AuthenticationWithProxyUserFilter.java | 8 ++++----
.../hadoop/security/CompositeGroupsMapping.java | 7 ++++---
.../org/apache/hadoop/security/Credentials.java | 10 +++++-----
.../java/org/apache/hadoop/security/Groups.java | 7 +++----
.../HttpCrossOriginFilterInitializer.java | 8 ++++----
.../security/JniBasedUnixGroupsMapping.java | 8 ++++----
.../JniBasedUnixGroupsMappingWithFallback.java | 8 ++++----
.../JniBasedUnixGroupsNetgroupMapping.java | 6 +++---
...edUnixGroupsNetgroupMappingWithFallback.java | 8 ++++----
.../hadoop/security/LdapGroupsMapping.java | 7 ++++---
.../apache/hadoop/security/ProviderUtils.java | 7 ++++---
.../apache/hadoop/security/SaslInputStream.java | 7 ++++---
.../apache/hadoop/security/SaslRpcClient.java | 7 ++++---
.../apache/hadoop/security/SaslRpcServer.java | 6 +++---
.../apache/hadoop/security/SecurityUtil.java | 9 ++++-----
.../hadoop/security/ShellBasedIdMapping.java | 8 ++++----
.../ShellBasedUnixGroupsNetgroupMapping.java | 8 ++++----
.../hadoop/security/WhitelistBasedResolver.java | 7 ++++---
.../alias/AbstractJavaKeyStoreProvider.java | 6 +++---
.../authorize/ServiceAuthorizationManager.java | 9 +++++----
.../hadoop/security/http/CrossOriginFilter.java | 7 ++++---
.../security/ssl/FileBasedKeyStoresFactory.java | 8 ++++----
.../security/ssl/ReloadingX509TrustManager.java | 7 ++++---
.../hadoop/security/token/DtFileOperations.java | 7 ++++---
.../hadoop/security/token/DtUtilShell.java | 6 +++---
.../org/apache/hadoop/security/token/Token.java | 6 +++---
.../AbstractDelegationTokenSecretManager.java | 8 ++++----
.../apache/hadoop/service/AbstractService.java | 9 +++++----
.../apache/hadoop/service/CompositeService.java | 7 ++++---
.../service/LoggingStateChangeListener.java | 11 ++++++-----
.../hadoop/service/ServiceOperations.java | 5 +++--
.../tracing/TracerConfigurationManager.java | 8 ++++----
.../hadoop/util/ApplicationClassLoader.java | 10 +++++-----
.../apache/hadoop/util/AsyncDiskService.java | 7 ++++---
.../apache/hadoop/util/CombinedIPWhiteList.java | 7 ++++---
.../org/apache/hadoop/util/FileBasedIPList.java | 11 ++++++-----
.../main/java/org/apache/hadoop/util/GSet.java | 6 +++---
.../hadoop/util/GenericOptionsParser.java | 7 ++++---
.../org/apache/hadoop/util/HostsFileReader.java | 9 +++++----
.../apache/hadoop/util/IntrusiveCollection.java | 7 ++++---
.../org/apache/hadoop/util/JvmPauseMonitor.java | 6 +++---
.../org/apache/hadoop/util/MachineList.java | 6 +++---
.../apache/hadoop/util/NativeCodeLoader.java | 8 ++++----
.../hadoop/util/NodeHealthScriptRunner.java | 7 ++++---
.../java/org/apache/hadoop/util/Progress.java | 6 +++---
.../apache/hadoop/util/ShutdownHookManager.java | 7 ++++---
.../hadoop/util/ShutdownThreadsHelper.java | 7 ++++---
.../org/apache/hadoop/util/SysInfoLinux.java | 8 ++++----
.../org/apache/hadoop/util/SysInfoWindows.java | 7 ++++---
.../java/org/apache/hadoop/util/ThreadUtil.java | 7 +++----
.../org/apache/hadoop/util/VersionInfo.java | 8 ++++----
.../hadoop/util/concurrent/AsyncGetFuture.java | 7 ++++---
.../hadoop/util/concurrent/ExecutorHelper.java | 8 ++++----
.../HadoopScheduledThreadPoolExecutor.java | 8 ++++----
.../concurrent/HadoopThreadPoolExecutor.java | 8 ++++----
.../hadoop/crypto/CryptoStreamsTestBase.java | 6 +++---
.../apache/hadoop/crypto/TestCryptoCodec.java | 7 ++++---
.../apache/hadoop/fs/FCStatisticsBaseTest.java | 7 ++++---
.../org/apache/hadoop/fs/TestFileContext.java | 7 ++++---
.../org/apache/hadoop/fs/TestFileStatus.java | 8 ++++----
.../java/org/apache/hadoop/fs/TestFileUtil.java | 6 +++---
.../org/apache/hadoop/fs/TestFsShellCopy.java | 6 +++---
.../apache/hadoop/fs/TestFsShellReturnCode.java | 8 ++++----
.../org/apache/hadoop/fs/TestFsShellTouch.java | 6 +++---
.../org/apache/hadoop/fs/TestHarFileSystem.java | 7 ++++---
.../fs/contract/AbstractBondedFSContract.java | 8 ++++----
.../hadoop/fs/loadGenerator/LoadGenerator.java | 10 +++++-----
.../hadoop/ha/ActiveStandbyElectorTestUtil.java | 6 +++---
.../org/apache/hadoop/ha/DummyHAService.java | 7 ++++---
.../org/apache/hadoop/ha/MiniZKFCCluster.java | 7 ++++---
.../java/org/apache/hadoop/ha/TestHAAdmin.java | 6 +++---
.../org/apache/hadoop/ha/TestHealthMonitor.java | 6 +++---
.../apache/hadoop/http/TestGlobalFilter.java | 6 +++---
.../org/apache/hadoop/http/TestHttpServer.java | 6 +++---
.../apache/hadoop/http/TestHttpServerLogs.java | 6 +++---
.../hadoop/http/TestHttpServerWebapps.java | 9 +++++----
.../hadoop/http/TestHttpServerWithSpengo.java | 7 ++++---
.../org/apache/hadoop/http/TestPathFilter.java | 6 +++---
.../apache/hadoop/http/TestSSLHttpServer.java | 7 ++++---
.../apache/hadoop/http/TestServletFilter.java | 6 +++---
.../hadoop/http/resource/JerseyResource.java | 6 +++---
.../org/apache/hadoop/io/TestArrayFile.java | 7 +++++--
.../hadoop/io/TestDefaultStringifier.java | 8 +++++---
.../org/apache/hadoop/io/TestSequenceFile.java | 7 ++++---
.../java/org/apache/hadoop/io/TestSetFile.java | 7 +++----
.../org/apache/hadoop/io/TestWritableUtils.java | 7 ++++---
.../apache/hadoop/io/compress/TestCodec.java | 8 ++++----
.../io/compress/TestCompressionStreamReuse.java | 9 +++++----
.../apache/hadoop/io/nativeio/TestNativeIO.java | 10 +++++-----
.../TestSharedFileDescriptorFactory.java | 7 ++++---
.../org/apache/hadoop/ipc/TestAsyncIPC.java | 10 +++++-----
.../java/org/apache/hadoop/ipc/TestIPC.java | 17 ++++++++---------
.../hadoop/ipc/TestIPCServerResponder.java | 10 +++++-----
.../ipc/TestProtoBufRpcServerHandoff.java | 12 ++++++------
.../java/org/apache/hadoop/ipc/TestRPC.java | 6 +++---
.../apache/hadoop/ipc/TestRPCCompatibility.java | 8 ++++----
.../hadoop/ipc/TestRPCServerShutdown.java | 7 ++++---
.../apache/hadoop/ipc/TestRpcServerHandoff.java | 8 ++++----
.../java/org/apache/hadoop/ipc/TestSaslRPC.java | 7 +++----
.../java/org/apache/hadoop/ipc/TestServer.java | 4 ++--
.../ipc/TestWeightedRoundRobinMultiplexer.java | 7 ++++---
.../metrics2/impl/TestGangliaMetrics.java | 7 ++++---
.../hadoop/metrics2/impl/TestMetricsConfig.java | 7 ++++---
.../metrics2/impl/TestMetricsSystemImpl.java | 13 +++++++------
.../hadoop/metrics2/impl/TestSinkQueue.java | 10 ++++++----
.../hadoop/metrics2/lib/TestMutableMetrics.java | 7 ++++---
.../hadoop/metrics2/util/TestMetricsCache.java | 8 +++++---
.../org/apache/hadoop/net/ServerSocketUtil.java | 9 +++++----
.../java/org/apache/hadoop/net/TestDNS.java | 6 +++---
.../org/apache/hadoop/net/TestNetUtils.java | 6 +++---
.../hadoop/net/TestSocketIOWithTimeout.java | 8 +++++---
.../apache/hadoop/net/TestStaticMapping.java | 7 ++++---
.../hadoop/net/unix/TestDomainSocket.java | 2 +-
.../net/unix/TestDomainSocketWatcher.java | 15 ++++++++-------
.../security/TestCompositeGroupMapping.java | 7 ++++---
.../hadoop/security/TestDoAsEffectiveUser.java | 8 ++++----
.../hadoop/security/TestGroupFallback.java | 7 ++++---
.../hadoop/security/TestGroupsCaching.java | 8 ++++----
.../TestShellBasedUnixGroupsMapping.java | 8 ++++----
.../alias/TestCredentialProviderFactory.java | 7 ++++---
.../authorize/TestAccessControlList.java | 8 ++++----
.../security/authorize/TestProxyUsers.java | 8 ++++----
.../token/delegation/TestDelegationToken.java | 7 ++++---
.../hadoop/service/TestCompositeService.java | 7 ++++---
.../hadoop/service/TestServiceLifecycle.java | 7 ++++---
.../org/apache/hadoop/test/MetricsAsserts.java | 6 +++---
.../hadoop/test/MultithreadedTestUtil.java | 8 ++++----
.../hadoop/test/TestGenericTestUtils.java | 5 +----
.../org/apache/hadoop/test/TestJUnitSetup.java | 7 ++++---
.../hadoop/util/TestAsyncDiskService.java | 7 ++++---
.../org/apache/hadoop/util/TestClasspath.java | 9 +++++----
.../org/apache/hadoop/util/TestFindClass.java | 7 ++++---
.../hadoop/util/TestIdentityHashStore.java | 7 ++++---
.../apache/hadoop/util/TestLightWeightGSet.java | 7 ++++---
.../util/TestLightWeightResizableGSet.java | 7 ++++---
.../hadoop/util/TestNativeCodeLoader.java | 6 +++---
.../apache/hadoop/util/TestSignalLogger.java | 11 ++++++-----
.../org/apache/hadoop/util/TestWinUtils.java | 6 +++---
.../org/apache/hadoop/mount/MountdBase.java | 12 ++++++------
.../java/org/apache/hadoop/nfs/NfsExports.java | 6 +++---
.../org/apache/hadoop/nfs/nfs3/FileHandle.java | 6 +++---
.../org/apache/hadoop/nfs/nfs3/Nfs3Base.java | 10 +++++-----
.../hadoop/oncrpc/RegistrationClient.java | 7 ++++---
.../java/org/apache/hadoop/oncrpc/RpcCall.java | 10 +++++-----
.../org/apache/hadoop/oncrpc/RpcProgram.java | 6 +++---
.../java/org/apache/hadoop/oncrpc/RpcUtil.java | 11 ++++++-----
.../hadoop/oncrpc/SimpleTcpClientHandler.java | 7 ++++---
.../apache/hadoop/oncrpc/SimpleTcpServer.java | 7 ++++---
.../apache/hadoop/oncrpc/SimpleUdpServer.java | 7 ++++---
.../hadoop/oncrpc/security/Credentials.java | 6 +++---
.../hadoop/oncrpc/security/SecurityHandler.java | 7 ++++---
.../java/org/apache/hadoop/portmap/Portmap.java | 8 ++++----
.../hadoop/portmap/RpcProgramPortmap.java | 7 ++++---
257 files changed, 1021 insertions(+), 932 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 550aee7..de52fbb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -79,8 +79,6 @@ import javax.xml.transform.stream.StreamResult;
import com.google.common.base.Charsets;
import org.apache.commons.collections.map.UnmodifiableMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -98,6 +96,8 @@ import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.util.StringUtils;
import org.codehaus.stax2.XMLInputFactory2;
import org.codehaus.stax2.XMLStreamReader2;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
@@ -192,11 +192,12 @@ import com.google.common.base.Strings;
@InterfaceStability.Stable
public class Configuration implements Iterable<Map.Entry<String,String>>,
Writable {
- private static final Log LOG =
- LogFactory.getLog(Configuration.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(Configuration.class);
- private static final Log LOG_DEPRECATION =
- LogFactory.getLog("org.apache.hadoop.conf.Configuration.deprecation");
+ private static final Logger LOG_DEPRECATION =
+ LoggerFactory.getLogger(
+ "org.apache.hadoop.conf.Configuration.deprecation");
private boolean quietmode = true;
@@ -2885,10 +2886,10 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
}
return null;
} catch (IOException e) {
- LOG.fatal("error parsing conf " + name, e);
+ LOG.error("error parsing conf " + name, e);
throw new RuntimeException(e);
} catch (XMLStreamException e) {
- LOG.fatal("error parsing conf " + name, e);
+ LOG.error("error parsing conf " + name, e);
throw new RuntimeException(e);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java
index bdd006d..146c6d8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java
@@ -22,9 +22,10 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
-import org.apache.commons.logging.*;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Collection;
@@ -41,8 +42,8 @@ import java.util.Map;
public abstract class ReconfigurableBase
extends Configured implements Reconfigurable {
- private static final Log LOG =
- LogFactory.getLog(ReconfigurableBase.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ReconfigurableBase.class);
// Use for testing purpose.
private ReconfigurationUtil reconfigurationUtil = new ReconfigurationUtil();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
index bb221ee..5a616f7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.conf;
-import org.apache.commons.logging.*;
-
import org.apache.commons.lang.StringEscapeUtils;
import java.util.Collection;
@@ -33,6 +31,8 @@ import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A servlet for changing a node's configuration.
@@ -45,8 +45,8 @@ public class ReconfigurationServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
- private static final Log LOG =
- LogFactory.getLog(ReconfigurationServlet.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ReconfigurationServlet.class);
// the prefix used to fing the attribute holding the reconfigurable
// for a given request
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java
index 61ee743..de0e5dd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java
@@ -26,12 +26,12 @@ import javax.crypto.Cipher;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY;
@@ -42,8 +42,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY
*/
@InterfaceAudience.Private
public class JceAesCtrCryptoCodec extends AesCtrCryptoCodec {
- private static final Log LOG =
- LogFactory.getLog(JceAesCtrCryptoCodec.class.getName());
+ private static final Logger LOG =
+ LoggerFactory.getLogger(JceAesCtrCryptoCodec.class.getName());
private Configuration conf;
private String provider;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java
index d08e588..8d01f42 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java
@@ -26,22 +26,22 @@ import java.security.GeneralSecurityException;
import java.security.SecureRandom;
import java.util.Random;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import com.google.common.base.Preconditions;
import org.apache.hadoop.crypto.random.OsSecureRandom;
import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Implement the AES-CTR crypto codec using JNI into OpenSSL.
*/
@InterfaceAudience.Private
public class OpensslAesCtrCryptoCodec extends AesCtrCryptoCodec {
- private static final Log LOG =
- LogFactory.getLog(OpensslAesCtrCryptoCodec.class.getName());
+ private static final Logger LOG =
+ LoggerFactory.getLogger(OpensslAesCtrCryptoCodec.class.getName());
private Configuration conf;
private Random random;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
index 6a03bb6..133a9f9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
@@ -26,13 +26,13 @@ import javax.crypto.IllegalBlockSizeException;
import javax.crypto.NoSuchPaddingException;
import javax.crypto.ShortBufferException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.base.Preconditions;
import org.apache.hadoop.util.PerformanceAdvisory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* OpenSSL cipher using JNI.
@@ -41,8 +41,8 @@ import org.apache.hadoop.util.PerformanceAdvisory;
*/
@InterfaceAudience.Private
public final class OpensslCipher {
- private static final Log LOG =
- LogFactory.getLog(OpensslCipher.class.getName());
+ private static final Logger LOG =
+ LoggerFactory.getLogger(OpensslCipher.class.getName());
public static final int ENCRYPT_MODE = 1;
public static final int DECRYPT_MODE = 0;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
index 6c53a0a..1219bf9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
@@ -19,13 +19,13 @@ package org.apache.hadoop.crypto.random;
import java.util.Random;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.base.Preconditions;
import org.apache.hadoop.util.PerformanceAdvisory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* OpenSSL secure random using JNI.
@@ -44,8 +44,8 @@ import org.apache.hadoop.util.PerformanceAdvisory;
@InterfaceAudience.Private
public class OpensslSecureRandom extends Random {
private static final long serialVersionUID = -7828193502768789584L;
- private static final Log LOG =
- LogFactory.getLog(OpensslSecureRandom.class.getName());
+ private static final Logger LOG =
+ LoggerFactory.getLogger(OpensslSecureRandom.class.getName());
/** If native SecureRandom unavailable, use java SecureRandom */
private java.security.SecureRandom fallback = null;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java
index 9428b98..6671591 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java
@@ -23,12 +23,12 @@ import java.io.FileInputStream;
import java.io.IOException;
import java.util.Random;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT;
@@ -39,7 +39,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY
*/
@InterfaceAudience.Private
public class OsSecureRandom extends Random implements Closeable, Configurable {
- public static final Log LOG = LogFactory.getLog(OsSecureRandom.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(OsSecureRandom.class);
private static final long serialVersionUID = 6391500337172057900L;
@@ -112,7 +113,7 @@ public class OsSecureRandom extends Random implements Closeable, Configurable {
@Override
synchronized public void close() {
if (stream != null) {
- IOUtils.cleanup(LOG, stream);
+ IOUtils.cleanupWithLogger(LOG, stream);
stream = null;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index ef68437..9bea8f9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -32,8 +32,6 @@ import java.util.NoSuchElementException;
import java.util.StringTokenizer;
import java.util.concurrent.ConcurrentHashMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -52,6 +50,8 @@ import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class provides an interface for implementors of a Hadoop file system
@@ -66,7 +66,7 @@ import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class AbstractFileSystem {
- static final Log LOG = LogFactory.getLog(AbstractFileSystem.class);
+ static final Logger LOG = LoggerFactory.getLogger(AbstractFileSystem.class);
/** Recording statistics per a file system class. */
private static final Map<URI, Statistics>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index 0a8cc73..75622ad 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -27,14 +27,14 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Abstract Checksumed Fs.
@@ -110,8 +110,8 @@ public abstract class ChecksumFs extends FilterFs {
* It verifies that data matches checksums.
*******************************************************/
private static class ChecksumFSInputChecker extends FSInputChecker {
- public static final Log LOG
- = LogFactory.getLog(FSInputChecker.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(FSInputChecker.class);
private static final int HEADER_LENGTH = 8;
private ChecksumFs fs;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
index 3542a9b..09c3a8a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
@@ -26,12 +26,12 @@ import java.util.concurrent.DelayQueue;
import java.util.concurrent.Delayed;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A daemon thread that waits for the next file system to renew.
@@ -39,8 +39,8 @@ import org.apache.hadoop.util.Time;
@InterfaceAudience.Private
public class DelegationTokenRenewer
extends Thread {
- private static final Log LOG = LogFactory
- .getLog(DelegationTokenRenewer.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(DelegationTokenRenewer.class);
/** The renewable interface used by the renewer. */
public interface Renewable {
@@ -243,7 +243,7 @@ public class DelegationTokenRenewer
LOG.error("Interrupted while canceling token for " + fs.getUri()
+ "filesystem");
if (LOG.isDebugEnabled()) {
- LOG.debug(ie.getStackTrace());
+ LOG.debug("Exception in removeRenewAction: ", ie);
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
index 9b66c95..4f06e26 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
@@ -22,11 +22,12 @@ import java.io.IOException;
import java.io.InputStream;
import java.util.zip.Checksum;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.nio.ByteBuffer;
import java.nio.IntBuffer;
@@ -37,8 +38,8 @@ import java.nio.IntBuffer;
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Unstable
abstract public class FSInputChecker extends FSInputStream {
- public static final Log LOG
- = LogFactory.getLog(FSInputChecker.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(FSInputChecker.class);
/** The file name from which data is read from */
protected Path file;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 160a63d..fef968b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -35,8 +35,6 @@ import java.util.Stack;
import java.util.TreeSet;
import java.util.Map.Entry;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -63,6 +61,8 @@ import org.apache.hadoop.util.ShutdownHookManager;
import com.google.common.base.Preconditions;
import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* The FileContext class provides an interface for users of the Hadoop
@@ -169,7 +169,7 @@ import org.apache.htrace.core.Tracer;
@InterfaceStability.Stable
public class FileContext {
- public static final Log LOG = LogFactory.getLog(FileContext.class);
+ public static final Logger LOG = LoggerFactory.getLogger(FileContext.class);
/**
* Default permission for directory and symlink
* In previous versions, this default permission was also used to
@@ -332,7 +332,7 @@ public class FileContext {
}
});
} catch (InterruptedException ex) {
- LOG.error(ex);
+ LOG.error(ex.toString());
throw new IOException("Failed to get the AbstractFileSystem for path: "
+ uri, ex);
}
@@ -446,7 +446,7 @@ public class FileContext {
} catch (UnsupportedFileSystemException ex) {
throw ex;
} catch (IOException ex) {
- LOG.error(ex);
+ LOG.error(ex.toString());
throw new RuntimeException(ex);
}
return getFileContext(defaultAfs, aConf);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index b656a87..eb8a5c3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -45,8 +45,6 @@ import java.util.zip.ZipFile;
import org.apache.commons.collections.map.CaseInsensitiveMap;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -57,6 +55,8 @@ import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A collection of file-processing util methods
@@ -65,7 +65,7 @@ import org.apache.hadoop.util.StringUtils;
@InterfaceStability.Evolving
public class FileUtil {
- private static final Log LOG = LogFactory.getLog(FileUtil.class);
+ private static final Logger LOG = LoggerFactory.getLogger(FileUtil.class);
/* The error code is defined in winutils to indicate insufficient
* privilege to create symbolic links. This value need to keep in
@@ -697,7 +697,7 @@ public class FileUtil {
entry = tis.getNextTarEntry();
}
} finally {
- IOUtils.cleanup(LOG, tis, inputStream);
+ IOUtils.cleanupWithLogger(LOG, tis, inputStream);
}
}
@@ -1287,7 +1287,7 @@ public class FileUtil {
bos = new BufferedOutputStream(fos);
jos = new JarOutputStream(bos, jarManifest);
} finally {
- IOUtils.cleanup(LOG, jos, bos, fos);
+ IOUtils.cleanupWithLogger(LOG, jos, bos, fos);
}
String[] jarCp = {classPathJar.getCanonicalPath(),
unexpandedWildcardClasspath.toString()};
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index 59d15c2..721f4df 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -24,8 +24,6 @@ import java.util.Arrays;
import java.util.LinkedList;
import org.apache.commons.lang.WordUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@@ -39,12 +37,14 @@ import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** Provide command line access to a FileSystem. */
@InterfaceAudience.Private
public class FsShell extends Configured implements Tool {
- static final Log LOG = LogFactory.getLog(FsShell.class);
+ static final Logger LOG = LoggerFactory.getLogger(FsShell.class);
private static final int MAX_LINE_WIDTH = 80;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
index 0a82929..76e379c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
@@ -22,7 +22,6 @@ import java.util.LinkedList;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.permission.ChmodParser;
@@ -32,6 +31,7 @@ import org.apache.hadoop.fs.shell.CommandFormat;
import org.apache.hadoop.fs.shell.FsCommand;
import org.apache.hadoop.fs.shell.PathData;
import org.apache.hadoop.util.Shell;
+import org.slf4j.Logger;
/**
* This class is the home for file permissions related commands.
@@ -41,7 +41,7 @@ import org.apache.hadoop.util.Shell;
@InterfaceStability.Unstable
public class FsShellPermissions extends FsCommand {
- static Log LOG = FsShell.LOG;
+ static final Logger LOG = FsShell.LOG;
/**
* Register the permission related commands with the factory
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
index 7c69167..ca3db1d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
@@ -23,18 +23,19 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@InterfaceAudience.Private
@InterfaceStability.Unstable
class Globber {
- public static final Log LOG = LogFactory.getLog(Globber.class.getName());
+ public static final Logger LOG =
+ LoggerFactory.getLogger(Globber.class.getName());
private final FileSystem fs;
private final FileContext fc;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
index 6a1e8bd..4c2fd1b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
@@ -17,14 +17,14 @@
*/
package org.apache.hadoop.fs;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.FileNotFoundException;
@@ -50,7 +50,8 @@ import java.util.*;
public class HarFileSystem extends FileSystem {
- private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(HarFileSystem.class);
public static final String METADATA_CACHE_ENTRIES_KEY = "fs.har.metadatacache.entries";
public static final int METADATA_CACHE_ENTRIES_DEFAULT = 10;
@@ -1173,7 +1174,7 @@ public class HarFileSystem extends FileSystem {
LOG.warn("Encountered exception ", ioe);
throw ioe;
} finally {
- IOUtils.cleanup(LOG, lin, in);
+ IOUtils.cleanupWithLogger(LOG, lin, in);
}
FSDataInputStream aIn = fs.open(archiveIndexPath);
@@ -1198,7 +1199,7 @@ public class HarFileSystem extends FileSystem {
}
}
} finally {
- IOUtils.cleanup(LOG, aIn);
+ IOUtils.cleanupWithLogger(LOG, aIn);
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
index 1ed01ea..c1e9d21 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
@@ -23,14 +23,15 @@ import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
-import org.apache.commons.logging.*;
import org.apache.hadoop.util.*;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** An implementation of a round-robin scheme for disk allocation for creating
* files. The way it works is that it is kept track what disk was last
@@ -245,8 +246,8 @@ public class LocalDirAllocator {
private static class AllocatorPerContext {
- private final Log LOG =
- LogFactory.getLog(AllocatorPerContext.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(AllocatorPerContext.class);
private Random dirIndexRandomizer = new Random();
private String contextCfgItemName;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
index b771812..49cd600 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
@@ -19,11 +19,12 @@ package org.apache.hadoop.fs;
import java.io.IOException;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Provides a trash facility which supports pluggable Trash policies.
@@ -34,8 +35,8 @@ import org.apache.hadoop.conf.Configured;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Trash extends Configured {
- private static final org.apache.commons.logging.Log LOG =
- LogFactory.getLog(Trash.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(Trash.class);
private TrashPolicy trashPolicy; // configured trash policy instance
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index c65e16a..265e967 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -30,8 +30,6 @@ import java.text.SimpleDateFormat;
import java.util.Collection;
import java.util.Date;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -41,6 +39,8 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** Provides a <i>trash</i> feature. Files are moved to a user's trash
* directory, a subdirectory of their home directory named ".Trash". Files are
@@ -54,8 +54,8 @@ import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class TrashPolicyDefault extends TrashPolicy {
- private static final Log LOG =
- LogFactory.getLog(TrashPolicyDefault.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TrashPolicyDefault.class);
private static final Path CURRENT = new Path("Current");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index 5f4c8552..4c1236b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -25,8 +25,6 @@ import java.net.URI;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.commons.net.ftp.FTP;
import org.apache.commons.net.ftp.FTPClient;
import org.apache.commons.net.ftp.FTPFile;
@@ -45,6 +43,8 @@ import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.Progressable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* <p>
@@ -56,8 +56,8 @@ import org.apache.hadoop.util.Progressable;
@InterfaceStability.Stable
public class FTPFileSystem extends FileSystem {
- public static final Log LOG = LogFactory
- .getLog(FTPFileSystem.class);
+ public static final Logger LOG = LoggerFactory
+ .getLogger(FTPFileSystem.class);
public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index ddb2724..73ab5f6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -24,8 +24,6 @@ import java.io.InvalidObjectException;
import java.io.ObjectInputValidation;
import java.io.Serializable;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -33,6 +31,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A class for file/directory permissions.
@@ -41,7 +41,7 @@ import org.apache.hadoop.io.WritableFactory;
@InterfaceStability.Stable
public class FsPermission implements Writable, Serializable,
ObjectInputValidation {
- private static final Log LOG = LogFactory.getLog(FsPermission.class);
+ private static final Logger LOG = LoggerFactory.getLogger(FsPermission.class);
private static final long serialVersionUID = 0x2fe08564;
static final WritableFactory FACTORY = new WritableFactory() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java
index c7fae7b..de86bab 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java
@@ -23,19 +23,20 @@ import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.StringUtils;
import com.jcraft.jsch.ChannelSftp;
import com.jcraft.jsch.JSch;
import com.jcraft.jsch.JSchException;
import com.jcraft.jsch.Session;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** Concurrent/Multiple Connections. */
class SFTPConnectionPool {
- public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(SFTPFileSystem.class);
// Maximum number of allowed live connections. This doesn't mean we cannot
// have more live connections. It means that when we have more
// live connections than this threshold, any unused connection will be
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
index 6de69fa..421769d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
@@ -26,8 +26,6 @@ import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.Vector;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -41,11 +39,14 @@ import com.jcraft.jsch.ChannelSftp;
import com.jcraft.jsch.ChannelSftp.LsEntry;
import com.jcraft.jsch.SftpATTRS;
import com.jcraft.jsch.SftpException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** SFTP FileSystem. */
public class SFTPFileSystem extends FileSystem {
- public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(SFTPFileSystem.class);
private SFTPConnectionPool connectionPool;
private URI uri;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
index 4c5cbad..c292cf6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
@@ -27,8 +27,6 @@ import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -36,6 +34,8 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathNotFoundException;
import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* An abstract class for the execution of a file system command
@@ -59,7 +59,7 @@ abstract public class Command extends Configured {
private int depth = 0;
protected ArrayList<Exception> exceptions = new ArrayList<Exception>();
- private static final Log LOG = LogFactory.getLog(Command.class);
+ private static final Logger LOG = LoggerFactory.getLogger(Command.class);
/** allows stdout to be captured if necessary */
public PrintStream out = System.out;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index cf95a49..93fd2cf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -26,8 +26,6 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -47,6 +45,8 @@ import org.apache.zookeeper.KeeperException.Code;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
*
@@ -141,7 +141,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
@VisibleForTesting
protected static final String BREADCRUMB_FILENAME = "ActiveBreadCrumb";
- public static final Log LOG = LogFactory.getLog(ActiveStandbyElector.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(ActiveStandbyElector.class);
private static final int SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE = 1000;
@@ -712,7 +713,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
}
private void fatalError(String errorMessage) {
- LOG.fatal(errorMessage);
+ LOG.error(errorMessage);
reset();
appClient.notifyFatalError(errorMessage);
}
@@ -824,10 +825,10 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
createConnection();
success = true;
} catch(IOException e) {
- LOG.warn(e);
+ LOG.warn(e.toString());
sleepFor(5000);
} catch(KeeperException e) {
- LOG.warn(e);
+ LOG.warn(e.toString());
sleepFor(5000);
}
++connectionRetryCount;
@@ -866,7 +867,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
try {
tempZk.close();
} catch(InterruptedException e) {
- LOG.warn(e);
+ LOG.warn(e.toString());
}
zkConnectionState = ConnectionState.TERMINATED;
wantToBeInElection = false;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
index d952e29..3c05a25 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
@@ -19,9 +19,6 @@ package org.apache.hadoop.ha;
import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -32,6 +29,8 @@ import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.ipc.RPC;
import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* The FailOverController is responsible for electing an active service
@@ -43,7 +42,8 @@ import com.google.common.base.Preconditions;
@InterfaceStability.Evolving
public class FailoverController {
- private static final Log LOG = LogFactory.getLog(FailoverController.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(FailoverController.class);
private final int gracefulFenceTimeout;
private final int rpcTimeoutToNewActive;
@@ -252,7 +252,7 @@ public class FailoverController {
} catch (FailoverFailedException ffe) {
msg += ". Failback to " + fromSvc +
" failed (" + ffe.getMessage() + ")";
- LOG.fatal(msg);
+ LOG.error(msg);
}
}
throw new FailoverFailedException(msg, cause);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
index 5eff14c..9b7d7ba 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
@@ -28,8 +28,6 @@ import org.apache.commons.cli.Options;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@@ -43,6 +41,8 @@ import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A command-line tool for making calls in the HAServiceProtocol.
@@ -62,7 +62,7 @@ public abstract class HAAdmin extends Configured implements Tool {
* operation, which is why it is not documented in the usage below.
*/
private static final String FORCEMANUAL = "forcemanual";
- private static final Log LOG = LogFactory.getLog(HAAdmin.class);
+ private static final Logger LOG = LoggerFactory.getLogger(HAAdmin.class);
private int rpcTimeoutForChecks = -1;
@@ -449,7 +449,7 @@ public abstract class HAAdmin extends Configured implements Tool {
if (cmdLine.hasOption(FORCEMANUAL)) {
if (!confirmForceManual()) {
- LOG.fatal("Aborted");
+ LOG.error("Aborted");
return -1;
}
// Instruct the NNs to honor this request even if they're
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
index 24c149c..a93df75 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
@@ -23,8 +23,6 @@ import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
import org.apache.hadoop.ha.HAServiceProtocol;
@@ -35,6 +33,8 @@ import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.util.Daemon;
import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class is a daemon which runs in a loop, periodically heartbeating
@@ -47,7 +47,7 @@ import com.google.common.base.Preconditions;
*/
@InterfaceAudience.Private
public class HealthMonitor {
- private static final Log LOG = LogFactory.getLog(
+ private static final Logger LOG = LoggerFactory.getLogger(
HealthMonitor.class);
private Daemon daemon;
@@ -283,7 +283,7 @@ public class HealthMonitor {
setUncaughtExceptionHandler(new UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
- LOG.fatal("Health monitor failed", e);
+ LOG.error("Health monitor failed", e);
enterState(HealthMonitor.State.HEALTH_MONITOR_FAILED);
}
});
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
index 1afd937..2247a34 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
@@ -22,8 +22,6 @@ import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -31,6 +29,8 @@ import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class parses the configured list of fencing methods, and
@@ -61,7 +61,7 @@ public class NodeFencer {
private static final Pattern HASH_COMMENT_RE =
Pattern.compile("#.*$");
- private static final Log LOG = LogFactory.getLog(NodeFencer.class);
+ private static final Logger LOG = LoggerFactory.getLogger(NodeFencer.class);
/**
* Standard fencing methods included with Hadoop.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
index 64cd5a8..9ae113b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
@@ -23,8 +23,6 @@ import java.util.Collection;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configured;
import com.google.common.annotations.VisibleForTesting;
@@ -272,7 +270,7 @@ public class SshFenceByTcpPort extends Configured
* Adapter from JSch's logger interface to our log4j
*/
private static class LogAdapter implements com.jcraft.jsch.Logger {
- static final Log LOG = LogFactory.getLog(
+ static final Logger LOG = LoggerFactory.getLogger(
SshFenceByTcpPort.class.getName() + ".jsch");
@Override
@@ -285,9 +283,8 @@ public class SshFenceByTcpPort extends Configured
case com.jcraft.jsch.Logger.WARN:
return LOG.isWarnEnabled();
case com.jcraft.jsch.Logger.ERROR:
- return LOG.isErrorEnabled();
case com.jcraft.jsch.Logger.FATAL:
- return LOG.isFatalEnabled();
+ return LOG.isErrorEnabled();
default:
return false;
}
@@ -306,10 +303,8 @@ public class SshFenceByTcpPort extends Configured
LOG.warn(message);
break;
case com.jcraft.jsch.Logger.ERROR:
- LOG.error(message);
- break;
case com.jcraft.jsch.Logger.FATAL:
- LOG.fatal(message);
+ LOG.error(message);
break;
default:
break;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index 055bcaa..20a4681 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -28,8 +28,6 @@ import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@@ -56,11 +54,13 @@ import org.apache.zookeeper.data.ACL;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@InterfaceAudience.LimitedPrivate("HDFS")
public abstract class ZKFailoverController {
- static final Log LOG = LogFactory.getLog(ZKFailoverController.class);
+ static final Logger LOG = LoggerFactory.getLogger(ZKFailoverController.class);
public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum";
private static final String ZK_SESSION_TIMEOUT_KEY = "ha.zookeeper.session-timeout.ms";
@@ -162,7 +162,7 @@ public abstract class ZKFailoverController {
public int run(final String[] args) throws Exception {
if (!localTarget.isAutoFailoverEnabled()) {
- LOG.fatal("Automatic failover is not enabled for " + localTarget + "." +
+ LOG.error("Automatic failover is not enabled for " + localTarget + "." +
" Please ensure that automatic failover is enabled in the " +
"configuration before running the ZK failover controller.");
return ERR_CODE_AUTO_FAILOVER_NOT_ENABLED;
@@ -184,7 +184,7 @@ public abstract class ZKFailoverController {
}
});
} catch (RuntimeException rte) {
- LOG.fatal("The failover controller encounters runtime error: " + rte);
+ LOG.error("The failover controller encounters runtime error: " + rte);
throw (Exception)rte.getCause();
}
}
@@ -195,7 +195,7 @@ public abstract class ZKFailoverController {
try {
initZK();
} catch (KeeperException ke) {
- LOG.fatal("Unable to start failover controller. Unable to connect "
+ LOG.error("Unable to start failover controller. Unable to connect "
+ "to ZooKeeper quorum at " + zkQuorum + ". Please check the "
+ "configured value for " + ZK_QUORUM_KEY + " and ensure that "
+ "ZooKeeper is running.");
@@ -221,7 +221,7 @@ public abstract class ZKFailoverController {
}
if (!elector.parentZNodeExists()) {
- LOG.fatal("Unable to start failover controller. "
+ LOG.error("Unable to start failover controller. "
+ "Parent znode does not exist.\n"
+ "Run with -formatZK flag to initialize ZooKeeper.");
return ERR_CODE_NO_PARENT_ZNODE;
@@ -230,7 +230,7 @@ public abstract class ZKFailoverController {
try {
localTarget.checkFencingConfigured();
} catch (BadFencingConfigurationException e) {
- LOG.fatal("Fencing is not configured for " + localTarget + ".\n" +
+ LOG.error("Fencing is not configured for " + localTarget + ".\n" +
"You must configure a fencing method before using automatic " +
"failover.", e);
return ERR_CODE_NO_FENCER;
@@ -376,7 +376,7 @@ public abstract class ZKFailoverController {
}
private synchronized void fatalError(String err) {
- LOG.fatal("Fatal error occurred:" + err);
+ LOG.error("Fatal error occurred:" + err);
fatalError = err;
notifyAll();
}
@@ -395,7 +395,7 @@ public abstract class ZKFailoverController {
} catch (Throwable t) {
String msg = "Couldn't make " + localTarget + " active";
- LOG.fatal(msg, t);
+ LOG.error(msg, t);
recordActiveAttempt(new ActiveAttemptRecord(false, msg + "\n" +
StringUtils.stringifyException(t)));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
index 63bfbca..7f75582 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.ha.protocolPB;
import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.ha.HAServiceProtocol;
@@ -42,6 +40,8 @@ import org.apache.hadoop.ipc.RPC;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class is used on the server side. Calls come across the wire for the
@@ -61,7 +61,7 @@ public class HAServiceProtocolServerSideTranslatorPB implements
TransitionToActiveResponseProto.newBuilder().build();
private static final TransitionToStandbyResponseProto TRANSITION_TO_STANDBY_RESP =
TransitionToStandbyResponseProto.newBuilder().build();
- private static final Log LOG = LogFactory.getLog(
+ private static final Logger LOG = LoggerFactory.getLogger(
HAServiceProtocolServerSideTranslatorPB.class);
public HAServiceProtocolServerSideTranslatorPB(HAServiceProtocol server) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index d7436b2..28b9bb0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -53,8 +53,6 @@ import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.sun.jersey.spi.container.servlet.ServletContainer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -103,6 +101,8 @@ import org.eclipse.jetty.util.MultiException;
import org.eclipse.jetty.util.ssl.SslContextFactory;
import org.eclipse.jetty.util.thread.QueuedThreadPool;
import org.eclipse.jetty.webapp.WebAppContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Create a Jetty embedded server to answer http requests. The primary goal is
@@ -117,7 +117,7 @@ import org.eclipse.jetty.webapp.WebAppContext;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public final class HttpServer2 implements FilterContainer {
- public static final Log LOG = LogFactory.getLog(HttpServer2.class);
+ public static final Logger LOG = LoggerFactory.getLogger(HttpServer2.class);
public static final String HTTP_SCHEME = "http";
public static final String HTTPS_SCHEME = "https";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java
index 9ca5b92..fc64697 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java
@@ -29,11 +29,11 @@ import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.FilterInitializer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import javax.servlet.Filter;
@@ -47,7 +47,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_S
public class StaticUserWebFilter extends FilterInitializer {
static final String DEPRECATED_UGI_KEY = "dfs.web.ugi";
- private static final Log LOG = LogFactory.getLog(StaticUserWebFilter.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(StaticUserWebFilter.class);
static class User implements Principal {
private final String name;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java
index d4514c6..519fcd7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java
@@ -22,8 +22,6 @@ import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -36,6 +34,8 @@ import org.apache.hadoop.util.bloom.DynamicBloomFilter;
import org.apache.hadoop.util.bloom.Filter;
import org.apache.hadoop.util.bloom.Key;
import org.apache.hadoop.util.hash.Hash;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_ERROR_RATE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_ERROR_RATE_KEY;
@@ -52,7 +52,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOO
@InterfaceAudience.Public
@InterfaceStability.Stable
public class BloomMapFile {
- private static final Log LOG = LogFactory.getLog(BloomMapFile.class);
+ private static final Logger LOG = LoggerFactory.getLogger(BloomMapFile.class);
public static final String BLOOM_FILE_NAME = "bloom";
public static final int HASH_COUNT = 5;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
index 705678e..a2903f8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
@@ -22,11 +22,10 @@ import java.nio.ByteOrder;
import java.security.AccessController;
import java.security.PrivilegedAction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import sun.misc.Unsafe;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
import com.google.common.primitives.Longs;
import com.google.common.primitives.UnsignedBytes;
@@ -36,7 +35,7 @@ import com.google.common.primitives.UnsignedBytes;
* class to be able to compare arrays that start at non-zero offsets.
*/
abstract class FastByteComparisons {
- static final Log LOG = LogFactory.getLog(FastByteComparisons.class);
+ static final Logger LOG = LoggerFactory.getLogger(FastByteComparisons.class);
/**
* Lexicographically compare two byte arrays.
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[39/50] [abbrv] hadoop git commit: YARN-6778. In ResourceWeights,
weights and setWeights() should be final. (Daniel Templeton via Yufei
Gu)
Posted by xy...@apache.org.
YARN-6778. In ResourceWeights, weights and setWeights() should be final. (Daniel Templeton via Yufei Gu)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/daaf530f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/daaf530f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/daaf530f
Branch: refs/heads/HDFS-7240
Commit: daaf530fce4b91cf9f568b9b0c5e8b20e6774134
Parents: 5aa2bf2
Author: Yufei Gu <yu...@apache.org>
Authored: Tue Jul 18 16:38:07 2017 -0700
Committer: Yufei Gu <yu...@apache.org>
Committed: Tue Jul 18 16:38:07 2017 -0700
----------------------------------------------------------------------
.../yarn/server/resourcemanager/resource/ResourceWeights.java | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/daaf530f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
index 4c62318..3ce1517 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.util.StringUtils;
public class ResourceWeights {
public static final ResourceWeights NEUTRAL = new ResourceWeights(1.0f);
- private float[] weights = new float[ResourceType.values().length];
+ private final float[] weights = new float[ResourceType.values().length];
public ResourceWeights(float memoryWeight, float cpuWeight) {
weights[ResourceType.MEMORY.ordinal()] = memoryWeight;
@@ -40,7 +40,7 @@ public class ResourceWeights {
public ResourceWeights() { }
- public void setWeight(float weight) {
+ public final void setWeight(float weight) {
for (int i = 0; i < weights.length; i++) {
weights[i] = weight;
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[34/50] [abbrv] hadoop git commit: HADOOP-14539. Move commons logging
APIs over to slf4j in hadoop-common. Contributed by Wenxin He.
Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index a3bccef..1574431 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -32,13 +32,12 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -49,7 +48,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class IOUtils {
- public static final Log LOG = LogFactory.getLog(IOUtils.class);
+ public static final Logger LOG = LoggerFactory.getLogger(IOUtils.class);
/**
* Copies from one stream to another.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
index 908a893..2e21444 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
@@ -23,8 +23,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -37,6 +35,8 @@ import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.util.Options;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_KEY;
@@ -60,7 +60,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SK
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MapFile {
- private static final Log LOG = LogFactory.getLog(MapFile.class);
+ private static final Logger LOG = LoggerFactory.getLogger(MapFile.class);
/** The name of the index file. */
public static final String INDEX_FILE_NAME = "index";
@@ -1002,7 +1002,7 @@ public class MapFile {
while (reader.next(key, value)) // copy all entries
writer.append(key, value);
} finally {
- IOUtils.cleanup(LOG, writer, reader);
+ IOUtils.cleanupWithLogger(LOG, writer, reader);
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
index a8c0690..2e65f12 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
@@ -23,8 +23,6 @@ import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.nativeio.NativeIO;
@@ -33,6 +31,8 @@ import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_WILLNEED;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Manages a pool of threads which can issue readahead requests on file descriptors.
@@ -40,7 +40,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class ReadaheadPool {
- static final Log LOG = LogFactory.getLog(ReadaheadPool.class);
+ static final Logger LOG = LoggerFactory.getLogger(ReadaheadPool.class);
private static final int POOL_SIZE = 4;
private static final int MAX_POOL_SIZE = 16;
private static final int CAPACITY = 1024;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
index 475d272..2cc0e40 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
@@ -25,7 +25,6 @@ import java.rmi.server.UID;
import java.security.MessageDigest;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.*;
import org.apache.hadoop.util.Options;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.Options.CreateOpts;
@@ -51,6 +50,8 @@ import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.MergeSort;
import org.apache.hadoop.util.PriorityQueue;
import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -203,7 +204,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFile {
- private static final Log LOG = LogFactory.getLog(SequenceFile.class);
+ private static final Logger LOG = LoggerFactory.getLogger(SequenceFile.class);
private SequenceFile() {} // no public ctor
@@ -1923,7 +1924,7 @@ public class SequenceFile {
succeeded = true;
} finally {
if (!succeeded) {
- IOUtils.cleanup(LOG, this.in);
+ IOUtils.cleanupWithLogger(LOG, this.in);
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java
index 89f1e42..f5d33a1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java
@@ -25,9 +25,10 @@ import java.io.UTFDataFormatException;
import org.apache.hadoop.util.StringUtils;
-import org.apache.commons.logging.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** A WritableComparable for strings that uses the UTF8 encoding.
*
@@ -42,7 +43,7 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Stable
public class UTF8 implements WritableComparable<UTF8> {
- private static final Log LOG= LogFactory.getLog(UTF8.class);
+ private static final Logger LOG= LoggerFactory.getLogger(UTF8.class);
private static final DataInputBuffer IBUF = new DataInputBuffer();
private static final ThreadLocal<DataOutputBuffer> OBUF_FACTORY =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
index 01bffa7..f103aad 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
@@ -23,8 +23,6 @@ import java.util.Set;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -33,6 +31,8 @@ import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A global compressor/decompressor pool used to save and reuse
@@ -41,7 +41,7 @@ import com.google.common.cache.LoadingCache;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class CodecPool {
- private static final Log LOG = LogFactory.getLog(CodecPool.class);
+ private static final Logger LOG = LoggerFactory.getLogger(CodecPool.class);
/**
* A global compressor pool used to save the expensive
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
index 8fff75d..3701f20 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.io.compress;
import java.util.*;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -28,6 +26,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A factory that will find the correct codec for a given filename.
@@ -36,8 +36,8 @@ import org.apache.hadoop.util.StringUtils;
@InterfaceStability.Evolving
public class CompressionCodecFactory {
- public static final Log LOG =
- LogFactory.getLog(CompressionCodecFactory.class.getName());
+ public static final Logger LOG =
+ LoggerFactory.getLogger(CompressionCodecFactory.class.getName());
private static final ServiceLoader<CompressionCodec> CODEC_PROVIDERS =
ServiceLoader.load(CompressionCodec.class);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
index 31196cc..33f39ef 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
@@ -22,14 +22,14 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -37,7 +37,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class DefaultCodec implements Configurable, CompressionCodec, DirectDecompressionCodec {
- private static final Log LOG = LogFactory.getLog(DefaultCodec.class);
+ private static final Logger LOG = LoggerFactory.getLogger(DefaultCodec.class);
Configuration conf;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java
index a973dc9..d4a9787 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java
@@ -24,9 +24,8 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A {@link Compressor} based on the popular
@@ -42,7 +41,8 @@ public class Bzip2Compressor implements Compressor {
static final int DEFAULT_BLOCK_SIZE = 9;
static final int DEFAULT_WORK_FACTOR = 30;
- private static final Log LOG = LogFactory.getLog(Bzip2Compressor.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(Bzip2Compressor.class);
private long stream;
private int blockSize;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java
index 3135165..96693ad 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java
@@ -23,9 +23,8 @@ import java.nio.Buffer;
import java.nio.ByteBuffer;
import org.apache.hadoop.io.compress.Decompressor;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A {@link Decompressor} based on the popular
@@ -36,7 +35,8 @@ import org.apache.commons.logging.LogFactory;
public class Bzip2Decompressor implements Decompressor {
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
- private static final Log LOG = LogFactory.getLog(Bzip2Decompressor.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(Bzip2Decompressor.class);
private long stream;
private boolean conserveMemory;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java
index 45f1edd..d24b4bf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java
@@ -18,12 +18,12 @@
package org.apache.hadoop.io.compress.bzip2;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A collection of factories to create the right
@@ -31,7 +31,7 @@ import org.apache.hadoop.util.NativeCodeLoader;
*
*/
public class Bzip2Factory {
- private static final Log LOG = LogFactory.getLog(Bzip2Factory.class);
+ private static final Logger LOG = LoggerFactory.getLogger(Bzip2Factory.class);
private static String bzip2LibraryName = "";
private static boolean nativeBzip2Loaded;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java
index ccfae8b..3792c36 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java
@@ -22,19 +22,19 @@ import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A {@link Compressor} based on the lz4 compression algorithm.
* http://code.google.com/p/lz4/
*/
public class Lz4Compressor implements Compressor {
- private static final Log LOG =
- LogFactory.getLog(Lz4Compressor.class.getName());
+ private static final Logger LOG =
+ LoggerFactory.getLogger(Lz4Compressor.class.getName());
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
private int directBufferSize;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
index 685956c..f26ae84 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
@@ -22,18 +22,18 @@ import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A {@link Decompressor} based on the lz4 compression algorithm.
* http://code.google.com/p/lz4/
*/
public class Lz4Decompressor implements Decompressor {
- private static final Log LOG =
- LogFactory.getLog(Lz4Compressor.class.getName());
+ private static final Logger LOG =
+ LoggerFactory.getLogger(Lz4Compressor.class.getName());
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
private int directBufferSize;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
index 814718d..3d38680 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
@@ -22,19 +22,19 @@ import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A {@link Compressor} based on the snappy compression algorithm.
* http://code.google.com/p/snappy/
*/
public class SnappyCompressor implements Compressor {
- private static final Log LOG =
- LogFactory.getLog(SnappyCompressor.class.getName());
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SnappyCompressor.class.getName());
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
private int directBufferSize;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
index 8712431..f31b76c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
@@ -22,19 +22,19 @@ import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DirectDecompressor;
import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A {@link Decompressor} based on the snappy compression algorithm.
* http://code.google.com/p/snappy/
*/
public class SnappyDecompressor implements Decompressor {
- private static final Log LOG =
- LogFactory.getLog(SnappyDecompressor.class.getName());
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SnappyDecompressor.class.getName());
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
private int directBufferSize;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
index 509456e..739788f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
@@ -23,9 +23,8 @@ import java.util.zip.Deflater;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A wrapper around java.util.zip.Deflater to make it conform
@@ -34,7 +33,8 @@ import org.apache.commons.logging.LogFactory;
*/
public class BuiltInZlibDeflater extends Deflater implements Compressor {
- private static final Log LOG = LogFactory.getLog(BuiltInZlibDeflater.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(BuiltInZlibDeflater.class);
public BuiltInZlibDeflater(int level, boolean nowrap) {
super(level, nowrap);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
index 6396fcb..438c8be 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
@@ -25,9 +25,8 @@ import java.nio.ByteBuffer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.util.NativeCodeLoader;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A {@link Compressor} based on the popular
@@ -37,7 +36,8 @@ import org.apache.commons.logging.LogFactory;
*/
public class ZlibCompressor implements Compressor {
- private static final Log LOG = LogFactory.getLog(ZlibCompressor.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ZlibCompressor.class);
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java
index 9d8e1d9..93b3b6d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.io.compress.zlib;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
@@ -29,6 +27,8 @@ import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A collection of factories to create the right
@@ -36,8 +36,8 @@ import com.google.common.annotations.VisibleForTesting;
*
*/
public class ZlibFactory {
- private static final Log LOG =
- LogFactory.getLog(ZlibFactory.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ZlibFactory.class);
private static boolean nativeZlibLoaded = false;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
index 75b8fa5..69df56a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.io.erasurecode;
import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.erasurecode.codec.ErasureCodec;
@@ -31,6 +29,8 @@ import org.apache.hadoop.io.erasurecode.coder.ErasureEncoder;
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
@@ -48,7 +48,7 @@ import java.lang.reflect.InvocationTargetException;
@InterfaceAudience.Private
public final class CodecUtil {
- private static final Log LOG = LogFactory.getLog(CodecUtil.class);
+ private static final Logger LOG = LoggerFactory.getLogger(CodecUtil.class);
public static final String IO_ERASURECODE_CODEC = "io.erasurecode.codec.";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java
index 1c98f3c..cb462b8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java
@@ -18,17 +18,17 @@
package org.apache.hadoop.io.erasurecode;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Erasure code native libraries (for now, Intel ISA-L) related utilities.
*/
public final class ErasureCodeNative {
- private static final Log LOG =
- LogFactory.getLog(ErasureCodeNative.class.getName());
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ErasureCodeNative.class.getName());
/**
* The reason why ISA-L library is not available, or null if it is available.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java
index ce93266..43d8299 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java
@@ -30,8 +30,6 @@ import java.util.Arrays;
import java.util.Map;
import java.util.TreeMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -43,6 +41,8 @@ import org.apache.hadoop.io.file.tfile.CompareUtils.ScalarComparator;
import org.apache.hadoop.io.file.tfile.CompareUtils.ScalarLong;
import org.apache.hadoop.io.file.tfile.Compression.Algorithm;
import org.apache.hadoop.io.file.tfile.Utils.Version;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Block Compressed file, the underlying physical storage layer for TFile.
@@ -54,7 +54,7 @@ final class BCFile {
// the current version of BCFile impl, increment them (major or minor) made
// enough changes
static final Version API_VERSION = new Version((short) 1, (short) 0);
- static final Log LOG = LogFactory.getLog(BCFile.class);
+ static final Logger LOG = LoggerFactory.getLogger(BCFile.class);
/**
* Prevent the instantiation of BCFile objects.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
index 596b7ea..f82f4df 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
@@ -24,8 +24,6 @@ import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
@@ -35,6 +33,8 @@ import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY;
@@ -44,7 +44,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
* Compression related stuff.
*/
final class Compression {
- static final Log LOG = LogFactory.getLog(Compression.class);
+ static final Logger LOG = LoggerFactory.getLogger(Compression.class);
/**
* Prevent the instantiation of class.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java
index 56739c6..c63baa5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java
@@ -29,8 +29,6 @@ import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Comparator;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -51,6 +49,8 @@ import org.apache.hadoop.io.file.tfile.CompareUtils.BytesComparator;
import org.apache.hadoop.io.file.tfile.CompareUtils.MemcmpRawComparator;
import org.apache.hadoop.io.file.tfile.Utils.Version;
import org.apache.hadoop.io.serializer.JavaSerializationComparator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A TFile is a container of key-value pairs. Both keys and values are type-less
@@ -131,7 +131,7 @@ import org.apache.hadoop.io.serializer.JavaSerializationComparator;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class TFile {
- static final Log LOG = LogFactory.getLog(TFile.class);
+ static final Logger LOG = LoggerFactory.getLogger(TFile.class);
private static final String CHUNK_BUF_SIZE_ATTR = "tfile.io.chunk.size";
private static final String FS_INPUT_BUF_SIZE_ATTR =
@@ -335,7 +335,7 @@ public class TFile {
writerBCF.close();
}
} finally {
- IOUtils.cleanup(LOG, blkAppender, writerBCF);
+ IOUtils.cleanupWithLogger(LOG, blkAppender, writerBCF);
blkAppender = null;
writerBCF = null;
state = State.CLOSED;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
index 84b92ec..3ef6b27 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
@@ -25,8 +25,6 @@ import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
@@ -36,12 +34,14 @@ import org.apache.hadoop.io.file.tfile.BCFile.BlockRegion;
import org.apache.hadoop.io.file.tfile.BCFile.MetaIndexEntry;
import org.apache.hadoop.io.file.tfile.TFile.TFileIndexEntry;
import org.apache.hadoop.io.file.tfile.Utils.Version;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Dumping the information of a TFile.
*/
class TFileDumper {
- static final Log LOG = LogFactory.getLog(TFileDumper.class);
+ static final Logger LOG = LoggerFactory.getLogger(TFileDumper.class);
private TFileDumper() {
// namespace object not constructable.
@@ -290,7 +290,7 @@ class TFileDumper {
}
}
} finally {
- IOUtils.cleanup(LOG, reader, fsdis);
+ IOUtils.cleanupWithLogger(LOG, reader, fsdis);
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index b51c905..84cd42c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -40,9 +40,9 @@ import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.PerformanceAdvisory;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import sun.misc.Unsafe;
import com.google.common.annotations.VisibleForTesting;
@@ -98,7 +98,7 @@ public class NativeIO {
write. */
public static int SYNC_FILE_RANGE_WAIT_AFTER = 4;
- private static final Log LOG = LogFactory.getLog(NativeIO.class);
+ private static final Logger LOG = LoggerFactory.getLogger(NativeIO.class);
// Set to true via JNI if possible
public static boolean fadvisePossible = false;
@@ -634,7 +634,7 @@ public class NativeIO {
}
}
- private static final Log LOG = LogFactory.getLog(NativeIO.class);
+ private static final Logger LOG = LoggerFactory.getLogger(NativeIO.class);
private static boolean nativeLoaded = false;
@@ -940,10 +940,10 @@ public class NativeIO {
position += transferred;
}
} finally {
- IOUtils.cleanup(LOG, output);
- IOUtils.cleanup(LOG, fos);
- IOUtils.cleanup(LOG, input);
- IOUtils.cleanup(LOG, fis);
+ IOUtils.cleanupWithLogger(LOG, output);
+ IOUtils.cleanupWithLogger(LOG, fos);
+ IOUtils.cleanupWithLogger(LOG, input);
+ IOUtils.cleanupWithLogger(LOG, fis);
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
index 306244a..4126344 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
@@ -22,10 +22,10 @@ import java.io.IOException;
import java.io.FileDescriptor;
import org.apache.commons.lang.SystemUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A factory for creating shared file descriptors inside a given directory.
@@ -45,7 +45,8 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class SharedFileDescriptorFactory {
- public static final Log LOG = LogFactory.getLog(SharedFileDescriptorFactory.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(SharedFileDescriptorFactory.class);
private final String prefix;
private final String path;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index d6f3e04..fa0cb6e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -32,8 +32,6 @@ import java.util.Map.Entry;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.StandbyException;
@@ -41,6 +39,8 @@ import org.apache.hadoop.net.ConnectTimeoutException;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* <p>
@@ -49,7 +49,7 @@ import com.google.common.annotations.VisibleForTesting;
*/
public class RetryPolicies {
- public static final Log LOG = LogFactory.getLog(RetryPolicies.class);
+ public static final Logger LOG = LoggerFactory.getLogger(RetryPolicies.class);
/**
* <p>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
index 15a9b54..1f5acfe 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
@@ -19,17 +19,17 @@ package org.apache.hadoop.io.retry;
import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.retry.RetryPolicies.MultipleLinearRandomRetry;
import org.apache.hadoop.ipc.RemoteException;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.ipc.RetriableException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class RetryUtils {
- public static final Log LOG = LogFactory.getLog(RetryUtils.class);
+ public static final Logger LOG = LoggerFactory.getLogger(RetryUtils.class);
/**
* Return the default retry policy set in conf.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
index 3f177f8..969ca3a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
@@ -21,8 +21,6 @@ package org.apache.hadoop.io.serializer;
import java.util.ArrayList;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -31,6 +29,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.serializer.avro.AvroReflectSerialization;
import org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization;
import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* <p>
@@ -41,8 +41,8 @@ import org.apache.hadoop.util.ReflectionUtils;
@InterfaceStability.Evolving
public class SerializationFactory extends Configured {
- static final Log LOG =
- LogFactory.getLog(SerializationFactory.class.getName());
+ static final Logger LOG =
+ LoggerFactory.getLogger(SerializationFactory.class.getName());
private List<Serialization<?>> serializations = new ArrayList<Serialization<?>>();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
index 2764788..d1bd180 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
@@ -28,20 +28,21 @@ import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Abstracts queue operations for different blocking queues.
*/
public class CallQueueManager<E extends Schedulable>
extends AbstractQueue<E> implements BlockingQueue<E> {
- public static final Log LOG = LogFactory.getLog(CallQueueManager.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(CallQueueManager.class);
// Number of checkpoints for empty queue.
private static final int CHECKPOINT_NUM = 20;
// Interval to check empty queue.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 6b21c75..1daf803 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -21,8 +21,6 @@ package org.apache.hadoop.ipc;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability;
@@ -57,6 +55,8 @@ import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.concurrent.AsyncGet;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import javax.net.SocketFactory;
import javax.security.sasl.Sasl;
@@ -84,7 +84,7 @@ import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
@InterfaceStability.Evolving
public class Client implements AutoCloseable {
- public static final Log LOG = LogFactory.getLog(Client.class);
+ public static final Logger LOG = LoggerFactory.getLogger(Client.class);
/** A counter for generating call IDs. */
private static final AtomicInteger callIdCounter = new AtomicInteger();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
index 8bcaf05..20161b8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
@@ -33,11 +33,11 @@ import java.util.concurrent.atomic.AtomicLong;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.NotImplementedException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
import org.apache.hadoop.metrics2.util.MBeans;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A queue with multiple levels for each priority.
@@ -50,7 +50,7 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
public static final String IPC_CALLQUEUE_PRIORITY_LEVELS_KEY =
"faircallqueue.priority-levels";
- public static final Log LOG = LogFactory.getLog(FairCallQueue.class);
+ public static final Logger LOG = LoggerFactory.getLogger(FairCallQueue.class);
/* The queues */
private final ArrayList<BlockingQueue<E>> queues;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index b3f5458..639bbad 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -21,8 +21,6 @@ package org.apache.hadoop.ipc;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.*;
import com.google.protobuf.Descriptors.MethodDescriptor;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -39,6 +37,8 @@ import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.concurrent.AsyncGet;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import javax.net.SocketFactory;
import java.io.IOException;
@@ -55,7 +55,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
*/
@InterfaceStability.Evolving
public class ProtobufRpcEngine implements RpcEngine {
- public static final Log LOG = LogFactory.getLog(ProtobufRpcEngine.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(ProtobufRpcEngine.class);
private static final ThreadLocal<AsyncGet<Message, Exception>>
ASYNC_RETURN_MESSAGE = new ThreadLocal<>();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index e16a8f5..8f8eda6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -38,8 +38,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
import javax.net.SocketFactory;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -60,6 +58,8 @@ import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Time;
import com.google.protobuf.BlockingService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** A simple RPC mechanism.
*
@@ -110,7 +110,7 @@ public class RPC {
Writable rpcRequest, long receiveTime) throws Exception ;
}
- static final Log LOG = LogFactory.getLog(RPC.class);
+ static final Logger LOG = LoggerFactory.getLogger(RPC.class);
/**
* Get all superInterfaces that extend VersionedProtocol
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java
index ee84a04..e67e8d9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java
@@ -24,9 +24,9 @@ import com.google.common.base.Joiner;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Used to registry custom methods to refresh at runtime.
@@ -34,7 +34,8 @@ import org.apache.hadoop.classification.InterfaceStability;
*/
@InterfaceStability.Unstable
public class RefreshRegistry {
- public static final Log LOG = LogFactory.getLog(RefreshRegistry.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(RefreshRegistry.class);
// Used to hold singleton instance
private static class RegistryHolder {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java
index 7b85286..6f6ceb5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java
@@ -22,8 +22,6 @@ import java.util.Arrays;
import java.util.UUID;
import java.util.concurrent.locks.ReentrantLock;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.metrics.RetryCacheMetrics;
import org.apache.hadoop.util.LightWeightCache;
@@ -32,6 +30,8 @@ import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Maintains a cache of non-idempotent requests that have been successfully
@@ -44,7 +44,7 @@ import com.google.common.base.Preconditions;
*/
@InterfaceAudience.Private
public class RetryCache {
- public static final Log LOG = LogFactory.getLog(RetryCache.class);
+ public static final Logger LOG = LoggerFactory.getLogger(RetryCache.class);
private final RetryCacheMetrics retryCacheMetrics;
private static final int MAX_CAPACITY = 16;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index df108b8..d0694fb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -70,8 +70,6 @@ import javax.security.sasl.Sasl;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
@@ -125,6 +123,8 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.ByteString;
import com.google.protobuf.CodedOutputStream;
import com.google.protobuf.Message;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** An abstract IPC service. IPC calls take a single {@link Writable} as a
* parameter, and return a {@link Writable} as their value. A service runs on
@@ -293,9 +293,9 @@ public abstract class Server {
}
- public static final Log LOG = LogFactory.getLog(Server.class);
- public static final Log AUDITLOG =
- LogFactory.getLog("SecurityLogger."+Server.class.getName());
+ public static final Logger LOG = LoggerFactory.getLogger(Server.class);
+ public static final Logger AUDITLOG =
+ LoggerFactory.getLogger("SecurityLogger."+Server.class.getName());
private static final String AUTH_FAILED_FOR = "Auth failed for ";
private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for ";
@@ -1113,7 +1113,7 @@ public abstract class Server {
} catch (IOException ex) {
LOG.error("Error in Reader", ex);
} catch (Throwable re) {
- LOG.fatal("Bug in read selector!", re);
+ LOG.error("Bug in read selector!", re);
ExitUtil.terminate(1, "Bug in read selector!");
}
}
@@ -2692,7 +2692,7 @@ public abstract class Server {
}
} finally {
CurCall.set(null);
- IOUtils.cleanup(LOG, traceScope);
+ IOUtils.cleanupWithLogger(LOG, traceScope);
}
}
LOG.debug(Thread.currentThread().getName() + ": exiting");
@@ -2701,7 +2701,7 @@ public abstract class Server {
}
@VisibleForTesting
- void logException(Log logger, Throwable e, Call call) {
+ void logException(Logger logger, Throwable e, Call call) {
if (exceptionsHandler.isSuppressedLog(e.getClass())) {
return; // Log nothing.
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
index cfda947..d308725 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
@@ -20,9 +20,9 @@ package org.apache.hadoop.ipc;
import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Determines which queue to start reading from, occasionally drawing from
@@ -43,8 +43,8 @@ public class WeightedRoundRobinMultiplexer implements RpcMultiplexer {
public static final String IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY =
"faircallqueue.multiplexer.weights";
- public static final Log LOG =
- LogFactory.getLog(WeightedRoundRobinMultiplexer.class);
+ public static final Logger LOG =
+ LoggerFactory.getLogger(WeightedRoundRobinMultiplexer.class);
private final int numQueues; // The number of queues under our provisioning
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
index 1763c7f..fa0726d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
@@ -28,8 +28,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
import javax.net.SocketFactory;
-import org.apache.commons.logging.*;
-
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.ipc.Client.ConnectionId;
@@ -43,12 +41,14 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.*;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/** An RpcEngine implementation for Writable data. */
@InterfaceStability.Evolving
@Deprecated
public class WritableRpcEngine implements RpcEngine {
- private static final Log LOG = LogFactory.getLog(RPC.class);
+ private static final Logger LOG = LoggerFactory.getLogger(RPC.class);
//writableRpcVersion should be updated if there is a change
//in format of the rpc messages.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RetryCacheMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RetryCacheMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RetryCacheMetrics.java
index a853d64..fc09e0a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RetryCacheMetrics.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RetryCacheMetrics.java
@@ -17,8 +17,6 @@
*/
package org.apache.hadoop.ipc.metrics;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.RetryCache;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.annotation.Metric;
@@ -26,6 +24,8 @@ import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class is for maintaining the various RetryCache-related statistics
@@ -35,7 +35,7 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
@Metrics(about="Aggregate RetryCache metrics", context="rpc")
public class RetryCacheMetrics {
- static final Log LOG = LogFactory.getLog(RetryCacheMetrics.class);
+ static final Logger LOG = LoggerFactory.getLogger(RetryCacheMetrics.class);
final MetricsRegistry registry;
final String name;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java
index 8b7e995..6ed57ec 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java
@@ -17,14 +17,14 @@
*/
package org.apache.hadoop.ipc.metrics;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class is for maintaining RPC method related statistics
@@ -37,7 +37,7 @@ public class RpcDetailedMetrics {
@Metric MutableRatesWithAggregation rates;
@Metric MutableRatesWithAggregation deferredRpcRates;
- static final Log LOG = LogFactory.getLog(RpcDetailedMetrics.class);
+ static final Logger LOG = LoggerFactory.getLogger(RpcDetailedMetrics.class);
final MetricsRegistry registry;
final String name;
@@ -45,7 +45,7 @@ public class RpcDetailedMetrics {
name = "RpcDetailedActivityForPort"+ port;
registry = new MetricsRegistry("rpcdetailed")
.tag("port", "RPC port", String.valueOf(port));
- LOG.debug(registry.info());
+ LOG.debug(registry.info().toString());
}
public String name() { return name; }
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
index 8ce1379..d53d7d3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
@@ -17,8 +17,6 @@
*/
package org.apache.hadoop.ipc.metrics;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -31,6 +29,8 @@ import org.apache.hadoop.metrics2.lib.MutableCounterInt;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableQuantiles;
import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class is for maintaining the various RPC statistics
@@ -40,7 +40,7 @@ import org.apache.hadoop.metrics2.lib.MutableRate;
@Metrics(about="Aggregate RPC metrics", context="rpc")
public class RpcMetrics {
- static final Log LOG = LogFactory.getLog(RpcMetrics.class);
+ static final Logger LOG = LoggerFactory.getLogger(RpcMetrics.class);
final Server server;
final MetricsRegistry registry;
final String name;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
index a8d9fa7..33af448 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
@@ -19,9 +19,9 @@ package org.apache.hadoop.jmx;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.http.HttpServer2;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
@@ -116,7 +116,8 @@ import java.util.Set;
*
*/
public class JMXJsonServlet extends HttpServlet {
- private static final Log LOG = LogFactory.getLog(JMXJsonServlet.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(JMXJsonServlet.class);
static final String ACCESS_CONTROL_ALLOW_METHODS =
"Access-Control-Allow-Methods";
static final String ACCESS_CONTROL_ALLOW_ORIGIN =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
index a76acac..5282119 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MBeanInfoBuilder.java
@@ -106,7 +106,7 @@ class MBeanInfoBuilder implements MetricsVisitor {
}
++curRecNo;
}
- MetricsSystemImpl.LOG.debug(attrs);
+ MetricsSystemImpl.LOG.debug(attrs.toString());
MBeanAttributeInfo[] attrsArray = new MBeanAttributeInfo[attrs.size()];
return new MBeanInfo(name, description, attrs.toArray(attrsArray),
null, null, null); // no ops/ctors/notifications
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
index 8d834d2..ac4a24e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
@@ -41,18 +41,18 @@ import org.apache.commons.configuration2.builder.fluent.Configurations;
import org.apache.commons.configuration2.builder.fluent.Parameters;
import org.apache.commons.configuration2.convert.DefaultListDelimiterHandler;
import org.apache.commons.configuration2.ex.ConfigurationException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsFilter;
import org.apache.hadoop.metrics2.MetricsPlugin;
import org.apache.hadoop.metrics2.filter.GlobFilter;
import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Metrics configuration for MetricsSystemImpl
*/
class MetricsConfig extends SubsetConfiguration {
- static final Log LOG = LogFactory.getLog(MetricsConfig.class);
+ static final Logger LOG = LoggerFactory.getLogger(MetricsConfig.class);
static final String DEFAULT_FILE_NAME = "hadoop-metrics2.properties";
static final String PREFIX_DEFAULT = "*.";
@@ -121,7 +121,7 @@ class MetricsConfig extends SubsetConfiguration {
LOG.info("loaded properties from "+ fname);
LOG.debug(toString(cf));
MetricsConfig mc = new MetricsConfig(cf, prefix);
- LOG.debug(mc);
+ LOG.debug(mc.toString());
return mc;
} catch (ConfigurationException e) {
// Commons Configuration defines the message text when file not found
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java
index b2f3c4a..1199ebd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java
@@ -24,8 +24,6 @@ import java.util.concurrent.*;
import static com.google.common.base.Preconditions.*;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
@@ -36,13 +34,16 @@ import static org.apache.hadoop.metrics2.util.Contracts.*;
import org.apache.hadoop.metrics2.MetricsFilter;
import org.apache.hadoop.metrics2.MetricsSink;
import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* An adapter class for metrics sink and associated filters
*/
class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> {
- private final Log LOG = LogFactory.getLog(MetricsSinkAdapter.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(MetricsSinkAdapter.class);
private final String name, description, context;
private final MetricsSink sink;
private final MetricsFilter sourceFilter, recordFilter, metricFilter;
@@ -207,7 +208,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> {
stopping = true;
sinkThread.interrupt();
if (sink instanceof Closeable) {
- IOUtils.cleanup(LOG, (Closeable)sink);
+ IOUtils.cleanupWithLogger(LOG, (Closeable)sink);
}
try {
sinkThread.join();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
index 3406ace..f12ec67 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
@@ -33,8 +33,6 @@ import static com.google.common.base.Preconditions.*;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsFilter;
@@ -43,6 +41,8 @@ import org.apache.hadoop.metrics2.MetricsTag;
import static org.apache.hadoop.metrics2.impl.MetricsConfig.*;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.metrics2.util.Contracts.*;
@@ -51,7 +51,8 @@ import static org.apache.hadoop.metrics2.util.Contracts.*;
*/
class MetricsSourceAdapter implements DynamicMBean {
- private static final Log LOG = LogFactory.getLog(MetricsSourceAdapter.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(MetricsSourceAdapter.class);
private final String prefix, name;
private final MetricsSource source;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
index 24173f5..2248122 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
@@ -36,8 +36,6 @@ import com.google.common.annotations.VisibleForTesting;
import static com.google.common.base.Preconditions.*;
import org.apache.commons.configuration2.PropertiesConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.commons.math3.util.ArithmeticUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
@@ -62,6 +60,8 @@ import org.apache.hadoop.metrics2.lib.MutableStat;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A base class for metrics system singletons
@@ -70,7 +70,7 @@ import org.apache.hadoop.util.Time;
@Metrics(context="metricssystem")
public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
- static final Log LOG = LogFactory.getLog(MetricsSystemImpl.class);
+ static final Logger LOG = LoggerFactory.getLogger(MetricsSystemImpl.class);
static final String MS_NAME = "MetricsSystem";
static final String MS_STATS_NAME = MS_NAME +",sub=Stats";
static final String MS_STATS_DESC = "Metrics system metrics";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java
index 51b2e66..3d7a90e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java
@@ -22,20 +22,21 @@ import java.lang.reflect.Method;
import static com.google.common.base.Preconditions.*;
import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.annotation.Metric;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import static org.apache.hadoop.metrics2.util.Contracts.*;
/**
* Metric generated from a method, mostly used by annotation
*/
class MethodMetric extends MutableMetric {
- private static final Log LOG = LogFactory.getLog(MethodMetric.class);
+ private static final Logger LOG = LoggerFactory.getLogger(MethodMetric.class);
private final Object obj;
private final Method method;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java
index 0669297..1fcede4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java
@@ -24,8 +24,6 @@ import java.lang.reflect.Method;
import static com.google.common.base.Preconditions.*;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsException;
@@ -34,6 +32,8 @@ import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Helper class to build {@link MetricsSource} object from annotations.
@@ -49,7 +49,8 @@ import org.apache.hadoop.util.ReflectionUtils;
*/
@InterfaceAudience.Private
public class MetricsSourceBuilder {
- private static final Log LOG = LogFactory.getLog(MetricsSourceBuilder.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(MetricsSourceBuilder.class);
private final Object source;
private final MutableMetricsFactory factory;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
index 8b4b083..b2042e7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
@@ -22,19 +22,20 @@ import java.lang.reflect.Field;
import java.lang.reflect.Method;
import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class MutableMetricsFactory {
- private static final Log LOG = LogFactory.getLog(MutableMetricsFactory.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(MutableMetricsFactory.class);
MutableMetric newForField(Field field, Metric annotation,
MetricsRegistry registry) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[38/50] [abbrv] hadoop git commit: HADOOP-14637.
GenericTestUtils.waitFor needs to check condition again after max wait time.
Contributed by Daniel Templeton
Posted by xy...@apache.org.
HADOOP-14637. GenericTestUtils.waitFor needs to check condition again after max wait time. Contributed by Daniel Templeton
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5aa2bf23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5aa2bf23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5aa2bf23
Branch: refs/heads/HDFS-7240
Commit: 5aa2bf231f40423865f0054ca27426ceb95ab4ba
Parents: f5f14a2
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Tue Jul 18 16:23:41 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Tue Jul 18 16:23:41 2017 -0500
----------------------------------------------------------------------
.../apache/hadoop/test/GenericTestUtils.java | 39 ++++++++++++++------
1 file changed, 27 insertions(+), 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aa2bf23/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 82a5e08..38a0c6c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -335,25 +335,40 @@ public abstract class GenericTestUtils {
}
}
+ /**
+ * Wait for the specified test to return true. The test will be performed
+ * initially and then every {@code checkEveryMillis} until at least
+ * {@code waitForMillis} time has expired. If {@code check} is null or
+ * {@code waitForMillis} is less than {@code checkEveryMillis} this method
+ * will throw an {@link IllegalArgumentException}.
+ *
+ * @param check the test to perform
+ * @param checkEveryMillis how often to perform the test
+ * @param waitForMillis the amount of time after which no more tests will be
+ * performed
+ * @throws TimeoutException if the test does not return true in the allotted
+ * time
+ * @throws InterruptedException if the method is interrupted while waiting
+ */
public static void waitFor(Supplier<Boolean> check, int checkEveryMillis,
int waitForMillis) throws TimeoutException, InterruptedException {
Preconditions.checkNotNull(check, ERROR_MISSING_ARGUMENT);
- Preconditions.checkArgument(waitForMillis > checkEveryMillis,
+ Preconditions.checkArgument(waitForMillis >= checkEveryMillis,
ERROR_INVALID_ARGUMENT);
long st = Time.now();
- do {
- boolean result = check.get();
- if (result) {
- return;
- }
-
+ boolean result = check.get();
+
+ while (!result && (Time.now() - st < waitForMillis)) {
Thread.sleep(checkEveryMillis);
- } while (Time.now() - st < waitForMillis);
-
- throw new TimeoutException("Timed out waiting for condition. " +
- "Thread diagnostics:\n" +
- TimedOutTestsListener.buildThreadDiagnosticString());
+ result = check.get();
+ }
+
+ if (!result) {
+ throw new TimeoutException("Timed out waiting for condition. " +
+ "Thread diagnostics:\n" +
+ TimedOutTestsListener.buildThreadDiagnosticString());
+ }
}
/**
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[27/50] [abbrv] hadoop git commit: HADOOP-14640. Azure: Support
affinity for service running on localhost and reuse SPNEGO hadoop.auth cookie
for authorization,
SASKey and delegation token generation. Contributed by Santhosh G Nayak.
Posted by xy...@apache.org.
HADOOP-14640. Azure: Support affinity for service running on localhost and reuse SPNEGO hadoop.auth cookie for authorization, SASKey and delegation token generation. Contributed by Santhosh G Nayak.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0e78ae0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0e78ae0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0e78ae0
Branch: refs/heads/HDFS-7240
Commit: b0e78ae085928c82ae63a05a29a628c2e289c0fc
Parents: fb3b5d3
Author: Jitendra Pandey <ji...@apache.org>
Authored: Mon Jul 17 02:27:55 2017 -0700
Committer: Jitendra Pandey <ji...@apache.org>
Committed: Mon Jul 17 02:27:55 2017 -0700
----------------------------------------------------------------------
.../fs/azure/RemoteSASKeyGeneratorImpl.java | 8 +-
.../fs/azure/RemoteWasbAuthorizerImpl.java | 8 +-
.../fs/azure/SecureWasbRemoteCallHelper.java | 86 ++++++++++++--------
.../hadoop/fs/azure/WasbRemoteCallHelper.java | 61 +++++++++++---
.../hadoop/fs/azure/security/Constants.java | 19 +++--
.../RemoteWasbDelegationTokenManager.java | 27 +++---
.../hadoop/fs/azure/security/SpnegoToken.java | 49 +++++++++++
.../fs/azure/TestWasbRemoteCallHelper.java | 58 ++++++++++++-
8 files changed, 245 insertions(+), 71 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e78ae0/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
index 87f3b0b..a7cedea 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
@@ -105,10 +105,11 @@ public class RemoteSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
*/
private static final String
SAS_KEY_GENERATOR_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
- "1000,3,10000,2";
+ "10,3,100,2";
private WasbRemoteCallHelper remoteCallHelper = null;
private boolean isKerberosSupportEnabled;
+ private boolean isSpnegoTokenCacheEnabled;
private RetryPolicy retryPolicy;
private String[] commaSeparatedUrls;
@@ -127,13 +128,16 @@ public class RemoteSASKeyGeneratorImpl extends SASKeyGeneratorImpl {
this.isKerberosSupportEnabled =
conf.getBoolean(Constants.AZURE_KERBEROS_SUPPORT_PROPERTY_NAME, false);
+ this.isSpnegoTokenCacheEnabled =
+ conf.getBoolean(Constants.AZURE_ENABLE_SPNEGO_TOKEN_CACHE, true);
this.commaSeparatedUrls = conf.getTrimmedStrings(KEY_CRED_SERVICE_URLS);
if (this.commaSeparatedUrls == null || this.commaSeparatedUrls.length <= 0) {
throw new IOException(
KEY_CRED_SERVICE_URLS + " config not set" + " in configuration.");
}
if (isKerberosSupportEnabled && UserGroupInformation.isSecurityEnabled()) {
- this.remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, false);
+ this.remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, false,
+ isSpnegoTokenCacheEnabled);
} else {
this.remoteCallHelper = new WasbRemoteCallHelper(retryPolicy);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e78ae0/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
index e2d515c..cd4e0a3 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
@@ -93,10 +93,11 @@ public class RemoteWasbAuthorizerImpl implements WasbAuthorizerInterface {
* Authorization Remote http client retry policy spec default value. {@value}
*/
private static final String AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
- "1000,3,10000,2";
+ "10,3,100,2";
private WasbRemoteCallHelper remoteCallHelper = null;
private boolean isKerberosSupportEnabled;
+ private boolean isSpnegoTokenCacheEnabled;
private RetryPolicy retryPolicy;
private String[] commaSeparatedUrls = null;
@@ -111,6 +112,8 @@ public class RemoteWasbAuthorizerImpl implements WasbAuthorizerInterface {
LOG.debug("Initializing RemoteWasbAuthorizerImpl instance");
this.isKerberosSupportEnabled =
conf.getBoolean(Constants.AZURE_KERBEROS_SUPPORT_PROPERTY_NAME, false);
+ this.isSpnegoTokenCacheEnabled =
+ conf.getBoolean(Constants.AZURE_ENABLE_SPNEGO_TOKEN_CACHE, true);
this.commaSeparatedUrls =
conf.getTrimmedStrings(KEY_REMOTE_AUTH_SERVICE_URLS);
if (this.commaSeparatedUrls == null
@@ -123,7 +126,8 @@ public class RemoteWasbAuthorizerImpl implements WasbAuthorizerInterface {
AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_SPEC,
AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT);
if (isKerberosSupportEnabled && UserGroupInformation.isSecurityEnabled()) {
- this.remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, false);
+ this.remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, false,
+ isSpnegoTokenCacheEnabled);
} else {
this.remoteCallHelper = new WasbRemoteCallHelper(retryPolicy);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e78ae0/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java
index 7f8bc0e..a0204be 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java
@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.azure;
import org.apache.commons.lang.Validate;
import org.apache.hadoop.fs.azure.security.Constants;
+import org.apache.hadoop.fs.azure.security.SpnegoToken;
import org.apache.hadoop.fs.azure.security.WasbDelegationTokenIdentifier;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.security.UserGroupInformation;
@@ -39,6 +40,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
+import java.net.InetAddress;
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
import java.util.List;
@@ -69,10 +71,21 @@ public class SecureWasbRemoteCallHelper extends WasbRemoteCallHelper {
*/
private boolean alwaysRequiresKerberosAuth;
+ /**
+ * Enable caching of Spnego token.
+ */
+ private boolean isSpnegoTokenCachingEnabled;
+
+ /**
+ * Cached SPNEGO token.
+ */
+ private SpnegoToken spnegoToken;
+
public SecureWasbRemoteCallHelper(RetryPolicy retryPolicy,
- boolean alwaysRequiresKerberosAuth) {
+ boolean alwaysRequiresKerberosAuth, boolean isSpnegoTokenCachingEnabled) {
super(retryPolicy);
this.alwaysRequiresKerberosAuth = alwaysRequiresKerberosAuth;
+ this.isSpnegoTokenCachingEnabled = isSpnegoTokenCachingEnabled;
}
@Override
@@ -81,32 +94,6 @@ public class SecureWasbRemoteCallHelper extends WasbRemoteCallHelper {
final String httpMethod) throws IOException {
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
UserGroupInformation connectUgi = ugi.getRealUser();
- if (connectUgi == null) {
- connectUgi = ugi;
- }
- if (delegationToken == null) {
- connectUgi.checkTGTAndReloginFromKeytab();
- }
- String s = null;
- try {
- s = connectUgi.doAs(new PrivilegedExceptionAction<String>() {
- @Override public String run() throws Exception {
- return retryableRequest(urls, path, queryParams, httpMethod);
- }
- });
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- throw new IOException(e.getMessage(), e);
- }
- return s;
- }
-
- @Override
- public HttpUriRequest getHttpRequest(String[] urls, String path,
- List<NameValuePair> queryParams, int urlIndex, String httpMethod)
- throws URISyntaxException, IOException {
- final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
- UserGroupInformation connectUgi = ugi.getRealUser();
if (connectUgi != null) {
queryParams.add(new NameValuePair() {
@Override public String getName() {
@@ -117,6 +104,8 @@ public class SecureWasbRemoteCallHelper extends WasbRemoteCallHelper {
return ugi.getShortUserName();
}
});
+ } else {
+ connectUgi = ugi;
}
final Token delegationToken = getDelegationToken(ugi);
@@ -134,8 +123,32 @@ public class SecureWasbRemoteCallHelper extends WasbRemoteCallHelper {
});
}
+ if (delegationToken == null) {
+ connectUgi.checkTGTAndReloginFromKeytab();
+ }
+ String s = null;
+ try {
+ s = connectUgi.doAs(new PrivilegedExceptionAction<String>() {
+ @Override public String run() throws Exception {
+ return retryableRequest(urls, path, queryParams, httpMethod);
+ }
+ });
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new IOException(e.getMessage(), e);
+ }
+ return s;
+ }
+
+ @Override
+ public HttpUriRequest getHttpRequest(String[] urls, String path,
+ List<NameValuePair> queryParams, int urlIndex, String httpMethod,
+ boolean requiresNewAuth) throws URISyntaxException, IOException {
URIBuilder uriBuilder =
new URIBuilder(urls[urlIndex]).setPath(path).setParameters(queryParams);
+ if (uriBuilder.getHost().equals("localhost")) {
+ uriBuilder.setHost(InetAddress.getLocalHost().getCanonicalHostName());
+ }
HttpUriRequest httpUriRequest = null;
switch (httpMethod) {
case HttpPut.METHOD_NAME:
@@ -152,11 +165,18 @@ public class SecureWasbRemoteCallHelper extends WasbRemoteCallHelper {
LOG.debug("SecureWasbRemoteCallHelper#getHttpRequest() {}",
uriBuilder.build().toURL());
if (alwaysRequiresKerberosAuth || delegationToken == null) {
- AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+ AuthenticatedURL.Token token = null;
final Authenticator kerberosAuthenticator =
new KerberosDelegationTokenAuthenticator();
try {
- kerberosAuthenticator.authenticate(uriBuilder.build().toURL(), token);
+ if (isSpnegoTokenCachingEnabled && !requiresNewAuth
+ && spnegoToken != null && spnegoToken.isTokenValid()){
+ token = spnegoToken.getToken();
+ } else {
+ token = new AuthenticatedURL.Token();
+ kerberosAuthenticator.authenticate(uriBuilder.build().toURL(), token);
+ spnegoToken = new SpnegoToken(token);
+ }
} catch (AuthenticationException e) {
throw new WasbRemoteCallException(
Constants.AUTHENTICATION_FAILED_ERROR_MESSAGE, e);
@@ -170,7 +190,7 @@ public class SecureWasbRemoteCallHelper extends WasbRemoteCallHelper {
return httpUriRequest;
}
- private synchronized Token<?> getDelegationToken(
+ private Token<?> getDelegationToken(
UserGroupInformation userGroupInformation) throws IOException {
if (this.delegationToken == null) {
Token<?> token = null;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e78ae0/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
index 7c26e8a..606c3f0 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -40,6 +40,7 @@ import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.InterruptedIOException;
+import java.net.InetAddress;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.util.List;
@@ -84,8 +85,7 @@ public class WasbRemoteCallHelper {
this.retryPolicy = retryPolicy;
}
- @VisibleForTesting
- public void updateHttpClient(HttpClient client) {
+ @VisibleForTesting public void updateHttpClient(HttpClient client) {
this.client = client;
}
@@ -111,25 +111,57 @@ public class WasbRemoteCallHelper {
HttpResponse response = null;
HttpUriRequest httpRequest = null;
- for (int retry = 0, index =
- random.nextInt(urls.length);; retry++, index++) {
+ /**
+ * Get the index of local url if any. If list of urls contains strings like
+ * "https://localhost:" or "http://localhost", consider it as local url and
+ * give it affinity more than other urls in the list.
+ */
+
+ int indexOfLocalUrl = -1;
+ for (int i = 0; i < urls.length; i++) {
+ if (urls[i].toLowerCase().startsWith("https://localhost:") || urls[i]
+ .toLowerCase().startsWith("http://localhost:")) {
+ indexOfLocalUrl = i;
+ }
+ }
+
+ boolean requiresNewAuth = false;
+ for (int retry = 0, index = (indexOfLocalUrl != -1)
+ ? indexOfLocalUrl
+ : random
+ .nextInt(urls.length);; retry++, index++) {
if (index >= urls.length) {
index = index % urls.length;
}
-
+ /**
+ * If the first request fails to localhost, then randomly pick the next url
+ * from the remaining urls in the list, so that load can be balanced.
+ */
+ if (indexOfLocalUrl != -1 && retry == 1) {
+ index = (index + random.nextInt(urls.length)) % urls.length;
+ if (index == indexOfLocalUrl) {
+ index = (index + 1) % urls.length;
+ }
+ }
try {
httpRequest =
- getHttpRequest(urls, path, queryParams, index, httpMethod);
-
+ getHttpRequest(urls, path, queryParams, index, httpMethod,
+ requiresNewAuth);
httpRequest.setHeader("Accept", APPLICATION_JSON);
response = client.execute(httpRequest);
StatusLine statusLine = response.getStatusLine();
if (statusLine == null
|| statusLine.getStatusCode() != HttpStatus.SC_OK) {
+ requiresNewAuth =
+ (statusLine == null)
+ || (statusLine.getStatusCode() == HttpStatus.SC_UNAUTHORIZED);
+
throw new WasbRemoteCallException(
httpRequest.getURI().toString() + ":" + ((statusLine != null)
? statusLine.toString()
: "NULL"));
+ } else {
+ requiresNewAuth = false;
}
Header contentTypeHeader = response.getFirstHeader("Content-Type");
@@ -200,11 +232,14 @@ public class WasbRemoteCallHelper {
}
protected HttpUriRequest getHttpRequest(String[] urls, String path,
- List<NameValuePair> queryParams, int urlIndex, String httpMethod)
- throws URISyntaxException, IOException {
+ List<NameValuePair> queryParams, int urlIndex, String httpMethod,
+ boolean requiresNewAuth) throws URISyntaxException, IOException {
URIBuilder uriBuilder = null;
uriBuilder =
new URIBuilder(urls[urlIndex]).setPath(path).setParameters(queryParams);
+ if (uriBuilder.getHost().equals("localhost")) {
+ uriBuilder.setHost(InetAddress.getLocalHost().getCanonicalHostName());
+ }
HttpUriRequest httpUriRequest = null;
switch (httpMethod) {
case HttpPut.METHOD_NAME:
@@ -246,7 +281,7 @@ public class WasbRemoteCallHelper {
Thread.sleep(a.delayMillis);
return;
}
- } catch(InterruptedIOException e) {
+ } catch (InterruptedIOException e) {
LOG.warn(e.getMessage(), e);
Thread.currentThread().interrupt();
return;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e78ae0/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/Constants.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/Constants.java
index cacdfc5..fa63837 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/Constants.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/Constants.java
@@ -23,22 +23,27 @@ package org.apache.hadoop.fs.azure.security;
*/
public final class Constants {
- private Constants() {
- }
-
/**
* The configuration property to enable Kerberos support.
*/
- public static final String AZURE_KERBEROS_SUPPORT_PROPERTY_NAME = "fs.azure.enable.kerberos.support";
-
+ public static final String AZURE_KERBEROS_SUPPORT_PROPERTY_NAME =
+ "fs.azure.enable.kerberos.support";
+ /**
+ * The configuration property to enable SPNEGO token cache.
+ */
+ public static final String AZURE_ENABLE_SPNEGO_TOKEN_CACHE =
+ "fs.azure.enable.spnego.token.cache";
/**
* Parameter to be used for impersonation.
*/
public static final String DOAS_PARAM = "doas";
-
/**
* Error message for Authentication failures.
*/
- public static final String AUTHENTICATION_FAILED_ERROR_MESSAGE = "Authentication Failed ";
+ public static final String AUTHENTICATION_FAILED_ERROR_MESSAGE =
+ "Authentication Failed ";
+
+ private Constants() {
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e78ae0/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/RemoteWasbDelegationTokenManager.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/RemoteWasbDelegationTokenManager.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/RemoteWasbDelegationTokenManager.java
index 1078f88..36381dc 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/RemoteWasbDelegationTokenManager.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/RemoteWasbDelegationTokenManager.java
@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -34,7 +34,7 @@ import java.io.IOException;
import java.util.Map;
/**
- * Class to manage delegation token operations by making rest call to remote service.
+ * Class to manage delegation token operations by making rest call to remote service.
*/
public class RemoteWasbDelegationTokenManager
implements WasbDelegationTokenManager {
@@ -64,24 +64,26 @@ public class RemoteWasbDelegationTokenManager
* Default for delegation token service http retry policy spec.
*/
private static final String DT_MANAGER_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
- "1000,3,10000,2";
+ "10,3,100,2";
private static final boolean
DT_MANAGER_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = true;
private static final Text WASB_DT_SERVICE_NAME = new Text("WASB_DT_SERVICE");
/**
- * Query parameter value for Getting delegation token http request
+ * Query parameter value for Getting delegation token http request
*/
private static final String GET_DELEGATION_TOKEN_OP = "GETDELEGATIONTOKEN";
/**
* Query parameter value for renewing delegation token http request
*/
- private static final String RENEW_DELEGATION_TOKEN_OP = "RENEWDELEGATIONTOKEN";
+ private static final String RENEW_DELEGATION_TOKEN_OP =
+ "RENEWDELEGATIONTOKEN";
/**
* Query parameter value for canceling the delegation token http request
*/
- private static final String CANCEL_DELEGATION_TOKEN_OP = "CANCELDELEGATIONTOKEN";
+ private static final String CANCEL_DELEGATION_TOKEN_OP =
+ "CANCELDELEGATIONTOKEN";
/**
* op parameter to represent the operation.
*/
@@ -100,6 +102,7 @@ public class RemoteWasbDelegationTokenManager
private static final String TOKEN_PARAM_KEY_NAME = "token";
private WasbRemoteCallHelper remoteCallHelper;
private String[] dtServiceUrls;
+ private boolean isSpnegoTokenCacheEnabled;
public RemoteWasbDelegationTokenManager(Configuration conf)
throws IOException {
@@ -108,8 +111,11 @@ public class RemoteWasbDelegationTokenManager
DT_MANAGER_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
DT_MANAGER_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY,
DT_MANAGER_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT);
+ this.isSpnegoTokenCacheEnabled =
+ conf.getBoolean(Constants.AZURE_ENABLE_SPNEGO_TOKEN_CACHE, true);
- remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, true);
+ remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, true,
+ isSpnegoTokenCacheEnabled);
this.dtServiceUrls =
conf.getTrimmedStrings(KEY_DELEGATION_TOKEN_SERVICE_URLS);
if (this.dtServiceUrls == null || this.dtServiceUrls.length <= 0) {
@@ -126,7 +132,8 @@ public class RemoteWasbDelegationTokenManager
new URIBuilder().setPath(DEFAULT_DELEGATION_TOKEN_MANAGER_ENDPOINT)
.addParameter(OP_PARAM_KEY_NAME, GET_DELEGATION_TOKEN_OP)
.addParameter(RENEWER_PARAM_KEY_NAME, renewer)
- .addParameter(SERVICE_PARAM_KEY_NAME, WASB_DT_SERVICE_NAME.toString());
+ .addParameter(SERVICE_PARAM_KEY_NAME,
+ WASB_DT_SERVICE_NAME.toString());
String responseBody = remoteCallHelper
.makeRemoteRequest(dtServiceUrls, uriBuilder.getPath(),
uriBuilder.getQueryParams(), HttpGet.METHOD_NAME);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e78ae0/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/SpnegoToken.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/SpnegoToken.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/SpnegoToken.java
new file mode 100644
index 0000000..fba4e41
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/security/SpnegoToken.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure.security;
+
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+
+/**
+ * Class to represent SPNEGO token.
+ */
+public class SpnegoToken {
+ private AuthenticatedURL.Token token;
+ private long expiryTime;
+ private static final long TOKEN_VALIDITY_TIME_IN_MS = 60 * 60 * 1000L;
+
+ public SpnegoToken(AuthenticatedURL.Token token) {
+ this.token = token;
+ //set the expiry time of the token to be 60 minutes,
+ // actual token will be valid for more than few hours and treating token as opaque.
+ this.expiryTime = System.currentTimeMillis() + TOKEN_VALIDITY_TIME_IN_MS;
+ }
+
+ public AuthenticatedURL.Token getToken() {
+ return token;
+ }
+
+ public long getExpiryTime() {
+ return expiryTime;
+ }
+
+ public boolean isTokenValid() {
+ return (expiryTime >= System.currentTimeMillis());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e78ae0/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
index f459b24..efda15d 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java
@@ -43,6 +43,8 @@ import org.mockito.ArgumentMatcher;
import org.mockito.Mockito;
import java.io.ByteArrayInputStream;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE;
@@ -62,7 +64,7 @@ public class TestWasbRemoteCallHelper
protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
Configuration conf = new Configuration();
conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true");
- conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, "http://localhost1/,http://localhost2/");
+ conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, "http://localhost1/,http://localhost2/,http://localhost:8080");
return AzureBlobStorageTestAccount.create(conf);
}
@@ -304,6 +306,18 @@ public class TestWasbRemoteCallHelper
Mockito.when(mockHttpResponseService2.getEntity())
.thenReturn(mockHttpEntity);
+ HttpResponse mockHttpResponseServiceLocal = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpResponseServiceLocal.getStatusLine())
+ .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR));
+ Mockito.when(mockHttpResponseServiceLocal.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponseServiceLocal.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponseServiceLocal.getEntity())
+ .thenReturn(mockHttpEntity);
+
+
+
class HttpGetForService1 extends ArgumentMatcher<HttpGet>{
@Override public boolean matches(Object o) {
return checkHttpGetMatchHost((HttpGet) o, "localhost1");
@@ -314,10 +328,21 @@ public class TestWasbRemoteCallHelper
return checkHttpGetMatchHost((HttpGet) o, "localhost2");
}
}
+ class HttpGetForServiceLocal extends ArgumentMatcher<HttpGet>{
+ @Override public boolean matches(Object o) {
+ try {
+ return checkHttpGetMatchHost((HttpGet) o, InetAddress.getLocalHost().getCanonicalHostName());
+ } catch (UnknownHostException e) {
+ return checkHttpGetMatchHost((HttpGet) o, "localhost");
+ }
+ }
+ }
Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService1())))
.thenReturn(mockHttpResponseService1);
Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService2())))
.thenReturn(mockHttpResponseService2);
+ Mockito.when(mockHttpClient.execute(argThat(new HttpGetForServiceLocal())))
+ .thenReturn(mockHttpResponseServiceLocal);
//Need 3 times because performop() does 3 fs operations.
Mockito.when(mockHttpEntity.getContent())
@@ -331,6 +356,7 @@ public class TestWasbRemoteCallHelper
performop(mockHttpClient);
+ Mockito.verify(mockHttpClient, times(3)).execute(Mockito.argThat(new HttpGetForServiceLocal()));
Mockito.verify(mockHttpClient, times(3)).execute(Mockito.argThat(new HttpGetForService2()));
}
@@ -362,6 +388,17 @@ public class TestWasbRemoteCallHelper
Mockito.when(mockHttpResponseService2.getEntity())
.thenReturn(mockHttpEntity);
+ HttpResponse mockHttpResponseService3 = Mockito.mock(HttpResponse.class);
+ Mockito.when(mockHttpResponseService3.getStatusLine())
+ .thenReturn(newStatusLine(
+ HttpStatus.SC_INTERNAL_SERVER_ERROR));
+ Mockito.when(mockHttpResponseService3.getFirstHeader("Content-Type"))
+ .thenReturn(newHeader("Content-Type", "application/json"));
+ Mockito.when(mockHttpResponseService3.getFirstHeader("Content-Length"))
+ .thenReturn(newHeader("Content-Length", "1024"));
+ Mockito.when(mockHttpResponseService3.getEntity())
+ .thenReturn(mockHttpEntity);
+
class HttpGetForService1 extends ArgumentMatcher<HttpGet>{
@Override public boolean matches(Object o) {
return checkHttpGetMatchHost((HttpGet) o, "localhost1");
@@ -372,10 +409,21 @@ public class TestWasbRemoteCallHelper
return checkHttpGetMatchHost((HttpGet) o, "localhost2");
}
}
+ class HttpGetForService3 extends ArgumentMatcher<HttpGet> {
+ @Override public boolean matches(Object o){
+ try {
+ return checkHttpGetMatchHost((HttpGet) o, InetAddress.getLocalHost().getCanonicalHostName());
+ } catch (UnknownHostException e) {
+ return checkHttpGetMatchHost((HttpGet) o, "localhost");
+ }
+ }
+ }
Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService1())))
.thenReturn(mockHttpResponseService1);
Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService2())))
.thenReturn(mockHttpResponseService2);
+ Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService3())))
+ .thenReturn(mockHttpResponseService3);
//Need 3 times because performop() does 3 fs operations.
Mockito.when(mockHttpEntity.getContent())
@@ -390,10 +438,12 @@ public class TestWasbRemoteCallHelper
performop(mockHttpClient);
}catch (WasbAuthorizationException e){
e.printStackTrace();
- Mockito.verify(mockHttpClient, atLeast(3))
+ Mockito.verify(mockHttpClient, atLeast(2))
.execute(argThat(new HttpGetForService1()));
- Mockito.verify(mockHttpClient, atLeast(3))
+ Mockito.verify(mockHttpClient, atLeast(2))
.execute(argThat(new HttpGetForService2()));
+ Mockito.verify(mockHttpClient, atLeast(3))
+ .execute(argThat(new HttpGetForService3()));
Mockito.verify(mockHttpClient, times(7)).execute(Mockito.<HttpGet>any());
}
}
@@ -425,7 +475,7 @@ public class TestWasbRemoteCallHelper
expectedEx.expectMessage(new MatchesPattern(
"org\\.apache\\.hadoop\\.fs\\.azure\\.WasbRemoteCallException: "
+ "Encountered error while making remote call to "
- + "http:\\/\\/localhost1\\/,http:\\/\\/localhost2\\/ retried 6 time\\(s\\)\\."));
+ + "http:\\/\\/localhost1\\/,http:\\/\\/localhost2\\/,http:\\/\\/localhost:8080 retried 6 time\\(s\\)\\."));
}
private void performop(HttpClient mockHttpClient) throws Throwable {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[10/50] [abbrv] hadoop git commit: YARN-6805. NPE in
LinuxContainerExecutor due to null PrivilegedOperationException exit code.
Contributed by Jason Lowe
Posted by xy...@apache.org.
YARN-6805. NPE in LinuxContainerExecutor due to null PrivilegedOperationException exit code. Contributed by Jason Lowe
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebc048cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebc048cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebc048cc
Branch: refs/heads/HDFS-7240
Commit: ebc048cc055d0f7d1b85bc0b6f56cd15673e837d
Parents: 0ffca5d
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Thu Jul 13 17:44:47 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Thu Jul 13 17:44:47 2017 -0500
----------------------------------------------------------------------
.../nodemanager/LinuxContainerExecutor.java | 19 +++--
.../PrivilegedOperationException.java | 10 +--
.../runtime/ContainerExecutionException.java | 10 +--
.../TestLinuxContainerExecutorWithMocks.java | 89 ++++++++++++++++++++
4 files changed, 111 insertions(+), 17 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebc048cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 9a3b2d2..2aaa835 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -275,6 +275,10 @@ public class LinuxContainerExecutor extends ContainerExecutor {
}
}
+ protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
+ return PrivilegedOperationExecutor.getInstance(getConf());
+ }
+
@Override
public void init() throws IOException {
Configuration conf = super.getConf();
@@ -285,7 +289,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
PrivilegedOperation checkSetupOp = new PrivilegedOperation(
PrivilegedOperation.OperationType.CHECK_SETUP);
PrivilegedOperationExecutor privilegedOperationExecutor =
- PrivilegedOperationExecutor.getInstance(conf);
+ getPrivilegedOperationExecutor();
privilegedOperationExecutor.executePrivilegedOperation(checkSetupOp,
false);
@@ -382,7 +386,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
try {
Configuration conf = super.getConf();
PrivilegedOperationExecutor privilegedOperationExecutor =
- PrivilegedOperationExecutor.getInstance(conf);
+ getPrivilegedOperationExecutor();
privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
initializeContainerOp, null, null, false, true);
@@ -530,8 +534,9 @@ public class LinuxContainerExecutor extends ContainerExecutor {
}
builder.append("Stack trace: "
+ StringUtils.stringifyException(e) + "\n");
- if (!e.getOutput().isEmpty()) {
- builder.append("Shell output: " + e.getOutput() + "\n");
+ String output = e.getOutput();
+ if (output != null && !e.getOutput().isEmpty()) {
+ builder.append("Shell output: " + output + "\n");
}
String diagnostics = builder.toString();
logOutput(diagnostics);
@@ -729,7 +734,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
try {
Configuration conf = super.getConf();
PrivilegedOperationExecutor privilegedOperationExecutor =
- PrivilegedOperationExecutor.getInstance(conf);
+ getPrivilegedOperationExecutor();
privilegedOperationExecutor.executePrivilegedOperation(deleteAsUserOp,
false);
@@ -759,7 +764,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
try {
PrivilegedOperationExecutor privOpExecutor =
- PrivilegedOperationExecutor.getInstance(super.getConf());
+ getPrivilegedOperationExecutor();
String results =
privOpExecutor.executePrivilegedOperation(listAsUserOp, true);
@@ -818,7 +823,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
mountCGroupsOp.appendArgs(cgroupKVs);
PrivilegedOperationExecutor privilegedOperationExecutor =
- PrivilegedOperationExecutor.getInstance(conf);
+ getPrivilegedOperationExecutor();
privilegedOperationExecutor.executePrivilegedOperation(mountCGroupsOp,
false);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebc048cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
index 3622489..9a11194 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationException.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
public class PrivilegedOperationException extends YarnException {
private static final long serialVersionUID = 1L;
- private Integer exitCode;
+ private int exitCode = -1;
private String output;
private String errorOutput;
@@ -36,7 +36,7 @@ public class PrivilegedOperationException extends YarnException {
super(message);
}
- public PrivilegedOperationException(String message, Integer exitCode,
+ public PrivilegedOperationException(String message, int exitCode,
String output, String errorOutput) {
super(message);
this.exitCode = exitCode;
@@ -48,8 +48,8 @@ public class PrivilegedOperationException extends YarnException {
super(cause);
}
- public PrivilegedOperationException(Throwable cause, Integer exitCode, String
- output, String errorOutput) {
+ public PrivilegedOperationException(Throwable cause, int exitCode,
+ String output, String errorOutput) {
super(cause);
this.exitCode = exitCode;
this.output = output;
@@ -59,7 +59,7 @@ public class PrivilegedOperationException extends YarnException {
super(message, cause);
}
- public Integer getExitCode() {
+ public int getExitCode() {
return exitCode;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebc048cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
index 1fbece2..3147277 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
@@ -32,10 +32,10 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
@InterfaceStability.Unstable
public class ContainerExecutionException extends YarnException {
private static final long serialVersionUID = 1L;
- private static final Integer EXIT_CODE_UNSET = -1;
+ private static final int EXIT_CODE_UNSET = -1;
private static final String OUTPUT_UNSET = "<unknown>";
- private Integer exitCode;
+ private int exitCode;
private String output;
private String errorOutput;
@@ -54,7 +54,7 @@ public class ContainerExecutionException extends YarnException {
}
- public ContainerExecutionException(String message, Integer exitCode, String
+ public ContainerExecutionException(String message, int exitCode, String
output, String errorOutput) {
super(message);
this.exitCode = exitCode;
@@ -62,7 +62,7 @@ public class ContainerExecutionException extends YarnException {
this.errorOutput = errorOutput;
}
- public ContainerExecutionException(Throwable cause, Integer exitCode, String
+ public ContainerExecutionException(Throwable cause, int exitCode, String
output, String errorOutput) {
super(cause);
this.exitCode = exitCode;
@@ -70,7 +70,7 @@ public class ContainerExecutionException extends YarnException {
this.errorOutput = errorOutput;
}
- public Integer getExitCode() {
+ public int getExitCode() {
return exitCode;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebc048cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
index 07134e8..cfd0e36 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
@@ -23,7 +23,9 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
@@ -40,6 +42,7 @@ import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
+import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -47,6 +50,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -54,6 +59,7 @@ import org.apache.hadoop.yarn.exceptions.ConfigurationException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DefaultLinuxContainerRuntime;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntime;
@@ -516,4 +522,87 @@ public class TestLinuxContainerExecutorWithMocks {
appSubmitter, cmd, "", baseDir0.toString(), baseDir1.toString()),
readMockParams());
}
+
+ @Test
+ public void testNoExitCodeFromPrivilegedOperation() throws Exception {
+ Configuration conf = new Configuration();
+ final PrivilegedOperationExecutor spyPrivilegedExecutor =
+ spy(PrivilegedOperationExecutor.getInstance(conf));
+ doThrow(new PrivilegedOperationException("interrupted"))
+ .when(spyPrivilegedExecutor).executePrivilegedOperation(
+ any(List.class), any(PrivilegedOperation.class),
+ any(File.class), any(Map.class), anyBoolean(), anyBoolean());
+ LinuxContainerRuntime runtime = new DefaultLinuxContainerRuntime(
+ spyPrivilegedExecutor);
+ runtime.initialize(conf);
+ mockExec = new LinuxContainerExecutor(runtime);
+ mockExec.setConf(conf);
+ LinuxContainerExecutor lce = new LinuxContainerExecutor(runtime) {
+ @Override
+ protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
+ return spyPrivilegedExecutor;
+ }
+ };
+ lce.setConf(conf);
+ InetSocketAddress address = InetSocketAddress.createUnresolved(
+ "localhost", 8040);
+ Path nmPrivateCTokensPath= new Path("file:///bin/nmPrivateCTokensPath");
+ LocalDirsHandlerService dirService = new LocalDirsHandlerService();
+ dirService.init(conf);
+
+ String appSubmitter = "nobody";
+ ApplicationId appId = ApplicationId.newInstance(1, 1);
+ ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
+ ContainerId cid = ContainerId.newContainerId(attemptId, 1);
+ HashMap<String, String> env = new HashMap<>();
+ Container container = mock(Container.class);
+ ContainerLaunchContext context = mock(ContainerLaunchContext.class);
+ when(container.getContainerId()).thenReturn(cid);
+ when(container.getLaunchContext()).thenReturn(context);
+ when(context.getEnvironment()).thenReturn(env);
+ Path workDir = new Path("/tmp");
+
+ try {
+ lce.startLocalizer(new LocalizerStartContext.Builder()
+ .setNmPrivateContainerTokens(nmPrivateCTokensPath)
+ .setNmAddr(address)
+ .setUser(appSubmitter)
+ .setAppId(appId.toString())
+ .setLocId("12345")
+ .setDirsHandler(dirService)
+ .build());
+ Assert.fail("startLocalizer should have thrown an exception");
+ } catch (IOException e) {
+ assertTrue("Unexpected exception " + e,
+ e.getMessage().contains("exitCode"));
+ }
+
+ lce.activateContainer(cid, new Path(workDir, "pid.txt"));
+ lce.launchContainer(new ContainerStartContext.Builder()
+ .setContainer(container)
+ .setNmPrivateContainerScriptPath(new Path("file:///bin/echo"))
+ .setNmPrivateTokensPath(new Path("file:///dev/null"))
+ .setUser(appSubmitter)
+ .setAppId(appId.toString())
+ .setContainerWorkDir(workDir)
+ .setLocalDirs(dirsHandler.getLocalDirs())
+ .setLogDirs(dirsHandler.getLogDirs())
+ .setFilecacheDirs(new ArrayList<>())
+ .setUserLocalDirs(new ArrayList<>())
+ .setContainerLocalDirs(new ArrayList<>())
+ .setContainerLogDirs(new ArrayList<>())
+ .build());
+ lce.deleteAsUser(new DeletionAsUserContext.Builder()
+ .setUser(appSubmitter)
+ .setSubDir(new Path("/tmp/testdir"))
+ .build());
+
+ try {
+ lce.mountCgroups(new ArrayList<String>(), "hierarchy");
+ Assert.fail("mountCgroups should have thrown an exception");
+ } catch (IOException e) {
+ assertTrue("Unexpected exception " + e,
+ e.getMessage().contains("exit code"));
+ }
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[05/50] [abbrv] hadoop git commit: HADOOP-14646.
FileContextMainOperationsBaseTest#testListStatusFilterWithSomeMatches never
runs. Contributed by Andras Bokor.
Posted by xy...@apache.org.
HADOOP-14646. FileContextMainOperationsBaseTest#testListStatusFilterWithSomeMatches never runs. Contributed by Andras Bokor.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b61ab857
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b61ab857
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b61ab857
Branch: refs/heads/HDFS-7240
Commit: b61ab8573eb2f224481118004f620fe9f18db74b
Parents: cf0d084
Author: Masatake Iwasaki <iw...@apache.org>
Authored: Thu Jul 13 21:41:43 2017 +0900
Committer: Masatake Iwasaki <iw...@apache.org>
Committed: Thu Jul 13 21:41:43 2017 +0900
----------------------------------------------------------------------
.../org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java | 1 +
1 file changed, 1 insertion(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b61ab857/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index a536e57..35ec4ff 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -391,6 +391,7 @@ public abstract class FileContextMainOperationsBaseTest {
}
+ @Test
public void testListStatusFilterWithSomeMatches() throws Exception {
Path[] testDirs = {
getTestRootPath(fc, TEST_DIR_AAA),
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[47/50] [abbrv] hadoop git commit: YARN-6777. Support for
ApplicationMasterService processing chain of interceptors. (asuresh)
Posted by xy...@apache.org.
YARN-6777. Support for ApplicationMasterService processing chain of interceptors. (asuresh)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/077fcf6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/077fcf6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/077fcf6a
Branch: refs/heads/HDFS-7240
Commit: 077fcf6a96e420e7f36350931722b8603d010cf1
Parents: 3556e36
Author: Arun Suresh <as...@apache.org>
Authored: Mon Jul 17 17:02:22 2017 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jul 19 12:26:40 2017 -0700
----------------------------------------------------------------------
.../ams/ApplicationMasterServiceContext.java | 29 ++++
.../ams/ApplicationMasterServiceProcessor.java | 30 ++--
.../hadoop/yarn/conf/YarnConfiguration.java | 5 +-
.../src/main/resources/yarn-default.xml | 10 ++
.../resourcemanager/AMSProcessingChain.java | 102 ++++++++++++
.../ApplicationMasterService.java | 49 ++++--
.../resourcemanager/DefaultAMSProcessor.java | 69 ++++----
...pportunisticContainerAllocatorAMService.java | 67 +++++---
.../yarn/server/resourcemanager/RMContext.java | 3 +-
.../TestApplicationMasterService.java | 163 ++++++++++++++++++-
10 files changed, 446 insertions(+), 81 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/077fcf6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceContext.java
new file mode 100644
index 0000000..988c727
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceContext.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.ams;
+
+/**
+ * This is a marker interface for a context object that is injected into
+ * the ApplicationMasterService processor. The processor implementation
+ * is free to type cast this based on the availability of the context's
+ * implementation in the classpath.
+ */
+public interface ApplicationMasterServiceContext {
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/077fcf6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
index b426f48..b7d925a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
@@ -38,34 +38,44 @@ import java.io.IOException;
public interface ApplicationMasterServiceProcessor {
/**
+ * Initialize with and ApplicationMasterService Context as well as the
+ * next processor in the chain.
+ * @param amsContext AMSContext.
+ * @param nextProcessor next ApplicationMasterServiceProcessor
+ */
+ void init(ApplicationMasterServiceContext amsContext,
+ ApplicationMasterServiceProcessor nextProcessor);
+
+ /**
* Register AM attempt.
* @param applicationAttemptId applicationAttemptId.
* @param request Register Request.
- * @return Register Response.
+ * @param response Register Response.
* @throws IOException IOException.
*/
- RegisterApplicationMasterResponse registerApplicationMaster(
+ void registerApplicationMaster(
ApplicationAttemptId applicationAttemptId,
- RegisterApplicationMasterRequest request) throws IOException;
+ RegisterApplicationMasterRequest request,
+ RegisterApplicationMasterResponse response) throws IOException;
/**
* Allocate call.
* @param appAttemptId appAttemptId.
* @param request Allocate Request.
- * @return Allocate Response.
+ * @param response Allocate Response.
* @throws YarnException YarnException.
*/
- AllocateResponse allocate(ApplicationAttemptId appAttemptId,
- AllocateRequest request) throws YarnException;
+ void allocate(ApplicationAttemptId appAttemptId,
+ AllocateRequest request, AllocateResponse response) throws YarnException;
/**
* Finish AM.
* @param applicationAttemptId applicationAttemptId.
* @param request Finish AM Request.
- * @return Finish AM response.
+ * @param response Finish AM Response.
*/
- FinishApplicationMasterResponse finishApplicationMaster(
+ void finishApplicationMaster(
ApplicationAttemptId applicationAttemptId,
- FinishApplicationMasterRequest request);
-
+ FinishApplicationMasterRequest request,
+ FinishApplicationMasterResponse response);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/077fcf6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 01eff64..93437e3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -103,7 +103,7 @@ public class YarnConfiguration extends Configuration {
YarnConfiguration.NM_PREFIX + "log-container-debug-info.enabled";
public static final boolean DEFAULT_NM_LOG_CONTAINER_DEBUG_INFO = false;
-
+
////////////////////////////////
// IPC Configs
////////////////////////////////
@@ -150,6 +150,9 @@ public class YarnConfiguration extends Configuration {
public static final String DEFAULT_RM_ADDRESS =
"0.0.0.0:" + DEFAULT_RM_PORT;
+ public static final String RM_APPLICATION_MASTER_SERVICE_PROCESSORS =
+ RM_PREFIX + "application-master-service.processors";
+
/** The actual bind address for the RM.*/
public static final String RM_BIND_HOST =
RM_PREFIX + "bind-host";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/077fcf6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 0588c6c..7ddcfcd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -123,6 +123,16 @@
</property>
<property>
+ <description>
+ Comma separated class names of ApplicationMasterServiceProcessor
+ implementations. The processors will be applied in the order
+ they are specified.
+ </description>
+ <name>yarn.resourcemanager.application-master-service.processors</name>
+ <value></value>
+ </property>
+
+ <property>
<description>
This configures the HTTP endpoint for Yarn Daemons.The following
values are supported:
http://git-wip-us.apache.org/repos/asf/hadoop/blob/077fcf6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AMSProcessingChain.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AMSProcessingChain.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AMSProcessingChain.java
new file mode 100644
index 0000000..931b1c8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AMSProcessingChain.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceContext;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+
+import java.io.IOException;
+
+/**
+ * This maintains a chain of {@link ApplicationMasterServiceProcessor}s.
+ */
+class AMSProcessingChain implements ApplicationMasterServiceProcessor {
+
+ private static final Log LOG = LogFactory.getLog(AMSProcessingChain.class);
+
+ private ApplicationMasterServiceProcessor head;
+ private RMContext rmContext;
+
+ /**
+ * This has to be initialized with at-least 1 Processor.
+ * @param rootProcessor Root processor.
+ */
+ AMSProcessingChain(ApplicationMasterServiceProcessor rootProcessor) {
+ if (rootProcessor == null) {
+ throw new YarnRuntimeException("No root ApplicationMasterService" +
+ "Processor specified for the processing chain..");
+ }
+ this.head = rootProcessor;
+ }
+
+ @Override
+ public void init(ApplicationMasterServiceContext amsContext,
+ ApplicationMasterServiceProcessor nextProcessor) {
+ LOG.info("Initializing AMS Processing chain. Root Processor=["
+ + this.head.getClass().getName() + "].");
+ this.rmContext = (RMContext)amsContext;
+ // The head is initialized with a null 'next' processor
+ this.head.init(amsContext, null);
+ }
+
+ /**
+ * Add an processor to the top of the chain.
+ * @param processor ApplicationMasterServiceProcessor
+ */
+ public synchronized void addProcessor(
+ ApplicationMasterServiceProcessor processor) {
+ LOG.info("Adding [" + processor.getClass().getName() + "] tp top of" +
+ " AMS Processing chain. ");
+ processor.init(this.rmContext, this.head);
+ this.head = processor;
+ }
+
+ @Override
+ public void registerApplicationMaster(
+ ApplicationAttemptId applicationAttemptId,
+ RegisterApplicationMasterRequest request,
+ RegisterApplicationMasterResponse resp) throws IOException {
+ this.head.registerApplicationMaster(applicationAttemptId, request, resp);
+ }
+
+ @Override
+ public void allocate(ApplicationAttemptId appAttemptId,
+ AllocateRequest request, AllocateResponse response) throws YarnException {
+ this.head.allocate(appAttemptId, request, response);
+ }
+
+ @Override
+ public void finishApplicationMaster(
+ ApplicationAttemptId applicationAttemptId,
+ FinishApplicationMasterRequest request,
+ FinishApplicationMasterResponse response) {
+ this.head.finishApplicationMaster(applicationAttemptId, request, response);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/077fcf6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index fe8b83c..76a1640 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -22,6 +22,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.net.InetSocketAddress;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
@@ -88,7 +89,7 @@ public class ApplicationMasterService extends AbstractService implements
private final ConcurrentMap<ApplicationAttemptId, AllocateResponseLock> responseMap =
new ConcurrentHashMap<ApplicationAttemptId, AllocateResponseLock>();
protected final RMContext rmContext;
- private final ApplicationMasterServiceProcessor amsProcessor;
+ private final AMSProcessingChain amsProcessingChain;
public ApplicationMasterService(RMContext rmContext,
YarnScheduler scheduler) {
@@ -101,11 +102,7 @@ public class ApplicationMasterService extends AbstractService implements
this.amLivelinessMonitor = rmContext.getAMLivelinessMonitor();
this.rScheduler = scheduler;
this.rmContext = rmContext;
- this.amsProcessor = createProcessor();
- }
-
- protected ApplicationMasterServiceProcessor createProcessor() {
- return new DefaultAMSProcessor(rmContext, rScheduler);
+ this.amsProcessingChain = new AMSProcessingChain(new DefaultAMSProcessor());
}
@Override
@@ -115,6 +112,21 @@ public class ApplicationMasterService extends AbstractService implements
YarnConfiguration.RM_SCHEDULER_ADDRESS,
YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
+ amsProcessingChain.init(rmContext, null);
+ List<ApplicationMasterServiceProcessor> processors = getProcessorList(conf);
+ if (processors != null) {
+ Collections.reverse(processors);
+ for (ApplicationMasterServiceProcessor p : processors) {
+ this.amsProcessingChain.addProcessor(p);
+ }
+ }
+ }
+
+ protected List<ApplicationMasterServiceProcessor> getProcessorList(
+ Configuration conf) {
+ return conf.getInstances(
+ YarnConfiguration.RM_APPLICATION_MASTER_SERVICE_PROCESSORS,
+ ApplicationMasterServiceProcessor.class);
}
@Override
@@ -165,6 +177,10 @@ public class ApplicationMasterService extends AbstractService implements
YarnConfiguration.DEFAULT_RM_SCHEDULER_CLIENT_THREAD_COUNT));
}
+ protected AMSProcessingChain getProcessingChain() {
+ return this.amsProcessingChain;
+ }
+
@Private
public InetSocketAddress getBindAddress() {
return this.masterServiceAddress;
@@ -214,8 +230,12 @@ public class ApplicationMasterService extends AbstractService implements
lastResponse.setResponseId(0);
lock.setAllocateResponse(lastResponse);
- return this.amsProcessor.registerApplicationMaster(
- amrmTokenIdentifier.getApplicationAttemptId(), request);
+ RegisterApplicationMasterResponse response =
+ recordFactory.newRecordInstance(
+ RegisterApplicationMasterResponse.class);
+ this.amsProcessingChain.registerApplicationMaster(
+ amrmTokenIdentifier.getApplicationAttemptId(), request, response);
+ return response;
}
}
@@ -265,8 +285,11 @@ public class ApplicationMasterService extends AbstractService implements
}
this.amLivelinessMonitor.receivedPing(applicationAttemptId);
- return this.amsProcessor.finishApplicationMaster(
- applicationAttemptId, request);
+ FinishApplicationMasterResponse response =
+ FinishApplicationMasterResponse.newInstance(false);
+ this.amsProcessingChain.finishApplicationMaster(
+ applicationAttemptId, request, response);
+ return response;
}
}
@@ -346,8 +369,10 @@ public class ApplicationMasterService extends AbstractService implements
throw new InvalidApplicationMasterRequestException(message);
}
- AllocateResponse response = this.amsProcessor.allocate(
- amrmTokenIdentifier.getApplicationAttemptId(), request);
+ AllocateResponse response =
+ recordFactory.newRecordInstance(AllocateResponse.class);
+ this.amsProcessingChain.allocate(
+ amrmTokenIdentifier.getApplicationAttemptId(), request, response);
// update AMRMToken if the token is rolled-up
MasterKeyData nextMasterKey =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/077fcf6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
index 6eb1fba..052ec22 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceContext;
import org.apache.hadoop.yarn.ams.ApplicationMasterServiceUtils;
import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@@ -81,7 +82,11 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
-class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
+/**
+ * This is the default Application Master Service processor. It has be the
+ * last processor in the @{@link AMSProcessingChain}.
+ */
+final class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
private static final Log LOG = LogFactory.getLog(DefaultAMSProcessor.class);
@@ -93,17 +98,19 @@ class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
private final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
- private final RMContext rmContext;
- private final YarnScheduler scheduler;
+ private RMContext rmContext;
- DefaultAMSProcessor(RMContext rmContext, YarnScheduler scheduler) {
- this.rmContext = rmContext;
- this.scheduler = scheduler;
+ @Override
+ public void init(ApplicationMasterServiceContext amsContext,
+ ApplicationMasterServiceProcessor nextProcessor) {
+ this.rmContext = (RMContext)amsContext;
}
- public RegisterApplicationMasterResponse registerApplicationMaster(
+ @Override
+ public void registerApplicationMaster(
ApplicationAttemptId applicationAttemptId,
- RegisterApplicationMasterRequest request) throws IOException {
+ RegisterApplicationMasterRequest request,
+ RegisterApplicationMasterResponse response) throws IOException {
RMApp app = getRmContext().getRMApps().get(
applicationAttemptId.getApplicationId());
@@ -116,8 +123,6 @@ class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
RMAuditLogger.AuditConstants.REGISTER_AM,
"ApplicationMasterService", app.getApplicationId(),
applicationAttemptId);
- RegisterApplicationMasterResponse response = recordFactory
- .newRecordInstance(RegisterApplicationMasterResponse.class);
response.setMaximumResourceCapability(getScheduler()
.getMaximumResourceCapability(app.getQueue()));
response.setApplicationACLs(app.getRMAppAttempt(applicationAttemptId)
@@ -165,11 +170,11 @@ class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
response.setSchedulerResourceTypes(getScheduler()
.getSchedulingResourceTypes());
- return response;
}
- public AllocateResponse allocate(ApplicationAttemptId appAttemptId,
- AllocateRequest request) throws YarnException {
+ @Override
+ public void allocate(ApplicationAttemptId appAttemptId,
+ AllocateRequest request, AllocateResponse response) throws YarnException {
handleProgress(appAttemptId, request);
@@ -259,50 +264,46 @@ class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
"blacklistRemovals: " + blacklistRemovals);
}
RMAppAttempt appAttempt = app.getRMAppAttempt(appAttemptId);
- AllocateResponse allocateResponse =
- recordFactory.newRecordInstance(AllocateResponse.class);
if (allocation.getNMTokens() != null &&
!allocation.getNMTokens().isEmpty()) {
- allocateResponse.setNMTokens(allocation.getNMTokens());
+ response.setNMTokens(allocation.getNMTokens());
}
// Notify the AM of container update errors
ApplicationMasterServiceUtils.addToUpdateContainerErrors(
- allocateResponse, updateErrors);
+ response, updateErrors);
// update the response with the deltas of node status changes
- handleNodeUpdates(app, allocateResponse);
+ handleNodeUpdates(app, response);
ApplicationMasterServiceUtils.addToAllocatedContainers(
- allocateResponse, allocation.getContainers());
+ response, allocation.getContainers());
- allocateResponse.setCompletedContainersStatuses(appAttempt
+ response.setCompletedContainersStatuses(appAttempt
.pullJustFinishedContainers());
- allocateResponse.setAvailableResources(allocation.getResourceLimit());
+ response.setAvailableResources(allocation.getResourceLimit());
- addToContainerUpdates(allocateResponse, allocation,
+ addToContainerUpdates(response, allocation,
((AbstractYarnScheduler)getScheduler())
.getApplicationAttempt(appAttemptId).pullUpdateContainerErrors());
- allocateResponse.setNumClusterNodes(getScheduler().getNumClusterNodes());
+ response.setNumClusterNodes(getScheduler().getNumClusterNodes());
// add collector address for this application
if (YarnConfiguration.timelineServiceV2Enabled(
getRmContext().getYarnConfiguration())) {
- allocateResponse.setCollectorAddr(
+ response.setCollectorAddr(
getRmContext().getRMApps().get(appAttemptId.getApplicationId())
.getCollectorAddr());
}
// add preemption to the allocateResponse message (if any)
- allocateResponse
- .setPreemptionMessage(generatePreemptionMessage(allocation));
+ response.setPreemptionMessage(generatePreemptionMessage(allocation));
// Set application priority
- allocateResponse.setApplicationPriority(app
+ response.setApplicationPriority(app
.getApplicationPriority());
- return allocateResponse;
}
private void handleNodeUpdates(RMApp app, AllocateResponse allocateResponse) {
@@ -351,20 +352,20 @@ class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
.getProgress()));
}
- public FinishApplicationMasterResponse finishApplicationMaster(
+ @Override
+ public void finishApplicationMaster(
ApplicationAttemptId applicationAttemptId,
- FinishApplicationMasterRequest request) {
+ FinishApplicationMasterRequest request,
+ FinishApplicationMasterResponse response) {
RMApp app =
getRmContext().getRMApps().get(applicationAttemptId.getApplicationId());
// For UnmanagedAMs, return true so they don't retry
- FinishApplicationMasterResponse response =
- FinishApplicationMasterResponse.newInstance(
+ response.setIsUnregistered(
app.getApplicationSubmissionContext().getUnmanagedAM());
getRmContext().getDispatcher().getEventHandler().handle(
new RMAppAttemptUnregistrationEvent(applicationAttemptId, request
.getTrackingUrl(), request.getFinalApplicationStatus(), request
.getDiagnostics()));
- return response;
}
private PreemptionMessage generatePreemptionMessage(Allocation allocation){
@@ -424,7 +425,7 @@ class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
}
protected YarnScheduler getScheduler() {
- return scheduler;
+ return rmContext.getScheduler();
}
private static void addToContainerUpdates(AllocateResponse allocateResponse,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/077fcf6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
index e03d944..3c278de 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
@@ -23,9 +23,13 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceContext;
import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor;
import org.apache.hadoop.yarn.ams.ApplicationMasterServiceUtils;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocolPB;
+import org.apache.hadoop.yarn.api.protocolrecords
+ .FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.NodeId;
@@ -101,17 +105,29 @@ public class OpportunisticContainerAllocatorAMService
private volatile List<RemoteNode> cachedNodes;
private volatile long lastCacheUpdateTime;
- class OpportunisticAMSProcessor extends DefaultAMSProcessor {
+ class OpportunisticAMSProcessor implements
+ ApplicationMasterServiceProcessor {
- OpportunisticAMSProcessor(RMContext rmContext, YarnScheduler
- scheduler) {
- super(rmContext, scheduler);
+ private ApplicationMasterServiceContext context;
+ private ApplicationMasterServiceProcessor nextProcessor;
+
+ private YarnScheduler getScheduler() {
+ return ((RMContext)context).getScheduler();
}
@Override
- public RegisterApplicationMasterResponse registerApplicationMaster(
+ public void init(ApplicationMasterServiceContext amsContext,
+ ApplicationMasterServiceProcessor next) {
+ this.context = amsContext;
+ // The AMSProcessingChain guarantees that 'next' is not null.
+ this.nextProcessor = next;
+ }
+
+ @Override
+ public void registerApplicationMaster(
ApplicationAttemptId applicationAttemptId,
- RegisterApplicationMasterRequest request) throws IOException {
+ RegisterApplicationMasterRequest request,
+ RegisterApplicationMasterResponse response) throws IOException {
SchedulerApplicationAttempt appAttempt = ((AbstractYarnScheduler)
getScheduler()).getApplicationAttempt(applicationAttemptId);
if (appAttempt.getOpportunisticContainerContext() == null) {
@@ -135,12 +151,14 @@ public class OpportunisticContainerAllocatorAMService
tokenExpiryInterval);
appAttempt.setOpportunisticContainerContext(opCtx);
}
- return super.registerApplicationMaster(applicationAttemptId, request);
+ nextProcessor.registerApplicationMaster(
+ applicationAttemptId, request, response);
}
@Override
- public AllocateResponse allocate(ApplicationAttemptId appAttemptId,
- AllocateRequest request) throws YarnException {
+ public void allocate(ApplicationAttemptId appAttemptId,
+ AllocateRequest request, AllocateResponse response)
+ throws YarnException {
// Partition requests to GUARANTEED and OPPORTUNISTIC.
OpportunisticContainerAllocator.PartitionedResourceRequests
partitionedAsks =
@@ -165,17 +183,22 @@ public class OpportunisticContainerAllocatorAMService
if (!oppContainers.isEmpty()) {
handleNewContainers(oppContainers, false);
appAttempt.updateNMTokens(oppContainers);
+ ApplicationMasterServiceUtils.addToAllocatedContainers(
+ response, oppContainers);
}
// Allocate GUARANTEED containers.
request.setAskList(partitionedAsks.getGuaranteed());
+ nextProcessor.allocate(appAttemptId, request, response);
+ }
- AllocateResponse response = super.allocate(appAttemptId, request);
- if (!oppContainers.isEmpty()) {
- ApplicationMasterServiceUtils.addToAllocatedContainers(
- response, oppContainers);
- }
- return response;
+ @Override
+ public void finishApplicationMaster(
+ ApplicationAttemptId applicationAttemptId,
+ FinishApplicationMasterRequest request,
+ FinishApplicationMasterResponse response) {
+ nextProcessor.finishApplicationMaster(applicationAttemptId,
+ request, response);
}
}
@@ -237,11 +260,6 @@ public class OpportunisticContainerAllocatorAMService
}
@Override
- protected ApplicationMasterServiceProcessor createProcessor() {
- return new OpportunisticAMSProcessor(rmContext, rmContext.getScheduler());
- }
-
- @Override
public Server getServer(YarnRPC rpc, Configuration serverConf,
InetSocketAddress addr, AMRMTokenSecretManager secretManager) {
if (YarnConfiguration.isDistSchedulingEnabled(serverConf)) {
@@ -262,6 +280,15 @@ public class OpportunisticContainerAllocatorAMService
}
@Override
+ protected List<ApplicationMasterServiceProcessor> getProcessorList(
+ Configuration conf) {
+ List<ApplicationMasterServiceProcessor> retVal =
+ super.getProcessorList(conf);
+ retVal.add(new OpportunisticAMSProcessor());
+ return retVal;
+ }
+
+ @Override
public RegisterDistributedSchedulingAMResponse
registerApplicationMasterForDistributedScheduling(
RegisterApplicationMasterRequest request) throws YarnException,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/077fcf6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
index ba6b915..0ea9516 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
@@ -23,6 +23,7 @@ import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceContext;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.ConfigurationProvider;
@@ -53,7 +54,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineC
/**
* Context of the ResourceManager.
*/
-public interface RMContext {
+public interface RMContext extends ApplicationMasterServiceContext {
Dispatcher getDispatcher();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/077fcf6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
index 18c49bd..85a36e7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
@@ -20,20 +20,29 @@ package org.apache.hadoop.yarn.server.resourcemanager;
import static java.lang.Thread.sleep;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceContext;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords
+ .RegisterApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
@@ -44,6 +53,7 @@ import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -61,7 +71,7 @@ import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Before;
import org.junit.Test;
public class TestApplicationMasterService {
@@ -71,13 +81,160 @@ public class TestApplicationMasterService {
private final int GB = 1024;
private static YarnConfiguration conf;
- @BeforeClass
- public static void setup() {
+ private static AtomicInteger beforeRegCount = new AtomicInteger(0);
+ private static AtomicInteger afterRegCount = new AtomicInteger(0);
+ private static AtomicInteger beforeAllocCount = new AtomicInteger(0);
+ private static AtomicInteger afterAllocCount = new AtomicInteger(0);
+ private static AtomicInteger beforeFinishCount = new AtomicInteger(0);
+ private static AtomicInteger afterFinishCount = new AtomicInteger(0);
+ private static AtomicInteger initCount = new AtomicInteger(0);
+
+ static class TestInterceptor1 implements
+ ApplicationMasterServiceProcessor {
+
+ private ApplicationMasterServiceProcessor nextProcessor;
+
+ @Override
+ public void init(ApplicationMasterServiceContext amsContext,
+ ApplicationMasterServiceProcessor next) {
+ initCount.incrementAndGet();
+ this.nextProcessor = next;
+ }
+
+ @Override
+ public void registerApplicationMaster(ApplicationAttemptId
+ applicationAttemptId, RegisterApplicationMasterRequest request,
+ RegisterApplicationMasterResponse response) throws IOException {
+ nextProcessor.registerApplicationMaster(
+ applicationAttemptId, request, response);
+ }
+
+ @Override
+ public void allocate(ApplicationAttemptId appAttemptId,
+ AllocateRequest request,
+ AllocateResponse response) throws YarnException {
+ beforeAllocCount.incrementAndGet();
+ nextProcessor.allocate(appAttemptId, request, response);
+ afterAllocCount.incrementAndGet();
+ }
+
+ @Override
+ public void finishApplicationMaster(
+ ApplicationAttemptId applicationAttemptId,
+ FinishApplicationMasterRequest request,
+ FinishApplicationMasterResponse response) {
+ beforeFinishCount.incrementAndGet();
+ afterFinishCount.incrementAndGet();
+ }
+ }
+
+ static class TestInterceptor2 implements
+ ApplicationMasterServiceProcessor {
+
+ private ApplicationMasterServiceProcessor nextProcessor;
+
+ @Override
+ public void init(ApplicationMasterServiceContext amsContext,
+ ApplicationMasterServiceProcessor next) {
+ initCount.incrementAndGet();
+ this.nextProcessor = next;
+ }
+
+ @Override
+ public void registerApplicationMaster(
+ ApplicationAttemptId applicationAttemptId,
+ RegisterApplicationMasterRequest request,
+ RegisterApplicationMasterResponse response) throws IOException {
+ beforeRegCount.incrementAndGet();
+ nextProcessor.registerApplicationMaster(applicationAttemptId,
+ request, response);
+ afterRegCount.incrementAndGet();
+ }
+
+ @Override
+ public void allocate(ApplicationAttemptId appAttemptId,
+ AllocateRequest request, AllocateResponse response)
+ throws YarnException {
+ beforeAllocCount.incrementAndGet();
+ nextProcessor.allocate(appAttemptId, request, response);
+ afterAllocCount.incrementAndGet();
+ }
+
+ @Override
+ public void finishApplicationMaster(
+ ApplicationAttemptId applicationAttemptId,
+ FinishApplicationMasterRequest request,
+ FinishApplicationMasterResponse response) {
+ beforeFinishCount.incrementAndGet();
+ nextProcessor.finishApplicationMaster(
+ applicationAttemptId, request, response);
+ afterFinishCount.incrementAndGet();
+ }
+ }
+
+ @Before
+ public void setup() {
conf = new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
ResourceScheduler.class);
}
+ @Test(timeout = 300000)
+ public void testApplicationMasterInterceptor() throws Exception {
+ conf.set(YarnConfiguration.RM_APPLICATION_MASTER_SERVICE_PROCESSORS,
+ TestInterceptor1.class.getName() + ","
+ + TestInterceptor2.class.getName());
+ MockRM rm = new MockRM(conf);
+ rm.start();
+
+ // Register node1
+ MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
+
+ // Submit an application
+ RMApp app1 = rm.submitApp(2048);
+
+ // kick the scheduling
+ nm1.nodeHeartbeat(true);
+ RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
+ MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
+ am1.registerAppAttempt();
+ int allocCount = 0;
+
+ am1.addRequests(new String[] {"127.0.0.1"}, GB, 1, 1);
+ AllocateResponse alloc1Response = am1.schedule(); // send the request
+ allocCount++;
+
+ // kick the scheduler
+ nm1.nodeHeartbeat(true);
+ while (alloc1Response.getAllocatedContainers().size() < 1) {
+ LOG.info("Waiting for containers to be created for app 1...");
+ sleep(1000);
+ alloc1Response = am1.schedule();
+ allocCount++;
+ }
+
+ // assert RMIdentifer is set properly in allocated containers
+ Container allocatedContainer =
+ alloc1Response.getAllocatedContainers().get(0);
+ ContainerTokenIdentifier tokenId =
+ BuilderUtils.newContainerTokenIdentifier(allocatedContainer
+ .getContainerToken());
+ am1.unregisterAppAttempt();
+
+ Assert.assertEquals(1, beforeRegCount.get());
+ Assert.assertEquals(1, afterRegCount.get());
+
+ // The allocate calls should be incremented twice
+ Assert.assertEquals(allocCount * 2, beforeAllocCount.get());
+ Assert.assertEquals(allocCount * 2, afterAllocCount.get());
+
+ // Finish should only be called once, since the FirstInterceptor
+ // does not forward the call.
+ Assert.assertEquals(1, beforeFinishCount.get());
+ Assert.assertEquals(1, afterFinishCount.get());
+ rm.stop();
+ }
+
@Test(timeout = 3000000)
public void testRMIdentifierOnContainerAllocation() throws Exception {
MockRM rm = new MockRM(conf);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[07/50] [abbrv] hadoop git commit: YARN-6654.
RollingLevelDBTimelineStore backwards incompatible after fst upgrade.
Contributed by Jonathan Eagles
Posted by xy...@apache.org.
YARN-6654. RollingLevelDBTimelineStore backwards incompatible after fst upgrade. Contributed by Jonathan Eagles
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f1ee72b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f1ee72b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f1ee72b
Branch: refs/heads/HDFS-7240
Commit: 5f1ee72b0ebf0330417b7c0115083bc851923be4
Parents: 945c095
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Thu Jul 13 17:27:40 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Thu Jul 13 17:27:40 2017 -0500
----------------------------------------------------------------------
.../timeline/RollingLevelDBTimelineStore.java | 50 ++++++++++++++++----
1 file changed, 41 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f1ee72b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index d139346..00f6630 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -28,6 +28,7 @@ import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumSet;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
@@ -74,6 +75,7 @@ import org.iq80.leveldb.Options;
import org.iq80.leveldb.ReadOptions;
import org.iq80.leveldb.WriteBatch;
import org.nustaq.serialization.FSTConfiguration;
+import org.nustaq.serialization.FSTClazzNameRegistry;
import static java.nio.charset.StandardCharsets.UTF_8;
@@ -170,9 +172,22 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
.getLog(RollingLevelDBTimelineStore.class);
private static FSTConfiguration fstConf =
FSTConfiguration.createDefaultConfiguration();
+ // Fall back to 2.24 parsing if 2.50 parsing fails
+ private static FSTConfiguration fstConf224 =
+ FSTConfiguration.createDefaultConfiguration();
+ // Static class code for 2.24
+ private static final int LINKED_HASH_MAP_224_CODE = 83;
static {
fstConf.setShareReferences(false);
+ fstConf224.setShareReferences(false);
+ // YARN-6654 unable to find class for code 83 (LinkedHashMap)
+ // The linked hash map was changed between 2.24 and 2.50 so that
+ // the static code for LinkedHashMap (83) was changed to a dynamic
+ // code.
+ FSTClazzNameRegistry registry = fstConf224.getClassRegistry();
+ registry.registerClass(
+ LinkedHashMap.class, LINKED_HASH_MAP_224_CODE, fstConf224);
}
@Private
@@ -339,7 +354,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
deletionThread.start();
}
super.serviceStart();
- }
+ }
@Override
protected void serviceStop() throws Exception {
@@ -365,7 +380,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
private final long ttl;
private final long ttlInterval;
- public EntityDeletionThread(Configuration conf) {
+ EntityDeletionThread(Configuration conf) {
ttl = conf.getLong(TIMELINE_SERVICE_TTL_MS,
DEFAULT_TIMELINE_SERVICE_TTL_MS);
ttlInterval = conf.getLong(
@@ -479,9 +494,15 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
try {
o = fstConf.asObject(iterator.peekNext().getValue());
entity.addOtherInfo(keyStr, o);
- } catch (Exception e) {
- LOG.warn("Error while decoding "
- + entityId + ":otherInfo:" + keyStr, e);
+ } catch (Exception ignore) {
+ try {
+ // Fall back to 2.24 parser
+ o = fstConf224.asObject(iterator.peekNext().getValue());
+ entity.addOtherInfo(keyStr, o);
+ } catch (Exception e) {
+ LOG.warn("Error while decoding "
+ + entityId + ":otherInfo:" + keyStr, e);
+ }
}
}
} else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
@@ -1348,8 +1369,13 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
Object o = null;
try {
o = fstConf.asObject(value);
- } catch (Exception e) {
- LOG.warn("Error while decoding " + tstype, e);
+ } catch (Exception ignore) {
+ try {
+ // Fall back to 2.24 parser
+ o = fstConf224.asObject(value);
+ } catch (Exception e) {
+ LOG.warn("Error while decoding " + tstype, e);
+ }
}
if (o == null) {
event.setEventInfo(null);
@@ -1378,8 +1404,14 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
try {
value = fstConf.asObject(bytes);
entity.addPrimaryFilter(name, value);
- } catch (Exception e) {
- LOG.warn("Error while decoding " + name, e);
+ } catch (Exception ignore) {
+ try {
+ // Fall back to 2.24 parser
+ value = fstConf224.asObject(bytes);
+ entity.addPrimaryFilter(name, value);
+ } catch (Exception e) {
+ LOG.warn("Error while decoding " + name, e);
+ }
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[20/50] [abbrv] hadoop git commit: Revert "HDFS-12130. Optimizing
permission check for getContentSummary." to fix commit message.
Posted by xy...@apache.org.
Revert "HDFS-12130. Optimizing permission check for getContentSummary." to fix commit message.
This reverts commit a29fe100b3c671954b759add5923a2b44af9e6a4.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1f12bb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1f12bb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1f12bb5
Branch: refs/heads/HDFS-7240
Commit: a1f12bb543778ddc243205eaa962e99da4d8f135
Parents: 9e0cde1
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Fri Jul 14 14:34:01 2017 -0700
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Fri Jul 14 14:34:01 2017 -0700
----------------------------------------------------------------------
.../server/blockmanagement/BlockCollection.java | 4 +-
.../ContentSummaryComputationContext.java | 20 --
.../namenode/DirectoryWithQuotaFeature.java | 4 +-
.../server/namenode/FSDirStatAndListingOp.java | 9 +-
.../server/namenode/FSPermissionChecker.java | 32 ---
.../hadoop/hdfs/server/namenode/INode.java | 9 +-
.../hdfs/server/namenode/INodeDirectory.java | 9 +-
.../hdfs/server/namenode/INodeReference.java | 3 +-
.../snapshot/DirectorySnapshottableFeature.java | 3 +-
.../snapshot/DirectoryWithSnapshotFeature.java | 3 +-
.../hdfs/server/namenode/snapshot/Snapshot.java | 4 +-
.../TestGetContentSummaryWithPermission.java | 201 -------------------
12 files changed, 16 insertions(+), 285 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index b880590..2f214be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -21,7 +21,6 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.security.AccessControlException;
/**
* This interface is used by the block manager to expose a
@@ -37,8 +36,7 @@ public interface BlockCollection {
/**
* Get content summary.
*/
- public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps)
- throws AccessControlException;
+ public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps);
/**
* @return the number of blocks or block groups
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 43e6f0d..8d5aa0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -20,14 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.security.AccessControlException;
-
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
@@ -49,8 +46,6 @@ public class ContentSummaryComputationContext {
public static final String REPLICATED = "Replicated";
public static final Log LOG = LogFactory.getLog(INode.class);
-
- private FSPermissionChecker pc;
/**
* Constructor
*
@@ -62,12 +57,6 @@ public class ContentSummaryComputationContext {
*/
public ContentSummaryComputationContext(FSDirectory dir,
FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
- this(dir, fsn, limitPerRun, sleepMicroSec, null);
- }
-
- public ContentSummaryComputationContext(FSDirectory dir,
- FSNamesystem fsn, long limitPerRun, long sleepMicroSec,
- FSPermissionChecker pc) {
this.dir = dir;
this.fsn = fsn;
this.limitPerRun = limitPerRun;
@@ -76,7 +65,6 @@ public class ContentSummaryComputationContext {
this.snapshotCounts = new ContentCounts.Builder().build();
this.sleepMilliSec = sleepMicroSec/1000;
this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
- this.pc = pc;
}
/** Constructor for blocking computation. */
@@ -198,12 +186,4 @@ public class ContentSummaryComputationContext {
}
return "";
}
-
- void checkPermission(INodeDirectory inode, int snapshotId, FsAction access)
- throws AccessControlException {
- if (dir != null && dir.isPermissionEnabled()
- && pc != null && !pc.isSuperUser()) {
- pc.checkPermission(inode, snapshotId, access);
- }
- }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
index 0968c65..31b45ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.EnumCounters;
-import org.apache.hadoop.security.AccessControlException;
/**
* Quota feature for {@link INodeDirectory}.
@@ -126,8 +125,7 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
}
ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir,
- final ContentSummaryComputationContext summary)
- throws AccessControlException {
+ final ContentSummaryComputationContext summary) {
final long original = summary.getCounts().getStoragespace();
long oldYieldCount = summary.getYieldCount();
dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 4c92249..04efa65 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -127,8 +127,10 @@ class FSDirStatAndListingOp {
FSDirectory fsd, String src) throws IOException {
FSPermissionChecker pc = fsd.getPermissionChecker();
final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ_LINK);
- // getContentSummaryInt() call will check access (if enabled) when
- // traversing all sub directories.
+ if (fsd.isPermissionEnabled()) {
+ fsd.checkPermission(pc, iip, false, null, null, null,
+ FsAction.READ_EXECUTE);
+ }
return getContentSummaryInt(fsd, iip);
}
@@ -511,8 +513,7 @@ class FSDirStatAndListingOp {
// processed. 0 means disabled. I.e. blocking for the entire duration.
ContentSummaryComputationContext cscc =
new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
- fsd.getContentCountLimit(), fsd.getContentSleepMicroSec(),
- fsd.getPermissionChecker());
+ fsd.getContentCountLimit(), fsd.getContentSleepMicroSec());
ContentSummary cs = targetNode.computeAndConvertContentSummary(
iip.getPathSnapshotId(), cscc);
fsd.addYieldCount(cscc.getYieldCount());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index f745a6c..f1250dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -195,38 +195,6 @@ class FSPermissionChecker implements AccessControlEnforcer {
ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
}
- /**
- * Check permission only for the given inode (not checking the children's
- * access).
- *
- * @param inode the inode to check.
- * @param snapshotId the snapshot id.
- * @param access the target access.
- * @throws AccessControlException
- */
- void checkPermission(INode inode, int snapshotId, FsAction access)
- throws AccessControlException {
- try {
- byte[][] localComponents = {inode.getLocalNameBytes()};
- INodeAttributes[] iNodeAttr = {inode.getSnapshotINode(snapshotId)};
- AccessControlEnforcer enforcer = getAccessControlEnforcer();
- enforcer.checkPermission(
- fsOwner, supergroup, callerUgi,
- iNodeAttr, // single inode attr in the array
- new INode[]{inode}, // single inode in the array
- localComponents, snapshotId,
- null, -1, // this will skip checkTraverse() because
- // not checking ancestor here
- false, null, null,
- access, // the target access to be checked against the inode
- null, // passing null sub access avoids checking children
- false);
- } catch (AccessControlException ace) {
- throw new AccessControlException(
- toAccessControlString(inode, inode.getFullPathName(), access));
- }
- }
-
@Override
public void checkPermission(String fsOwner, String supergroup,
UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index d768e08..1f982ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.Diff;
-import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.ChunkedArrayList;
import org.apache.hadoop.util.StringUtils;
@@ -419,8 +418,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
public abstract void destroyAndCollectBlocks(ReclaimContext reclaimContext);
/** Compute {@link ContentSummary}. Blocking call */
- public final ContentSummary computeContentSummary(
- BlockStoragePolicySuite bsps) throws AccessControlException {
+ public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) {
return computeAndConvertContentSummary(Snapshot.CURRENT_STATE_ID,
new ContentSummaryComputationContext(bsps));
}
@@ -429,7 +427,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* Compute {@link ContentSummary}.
*/
public final ContentSummary computeAndConvertContentSummary(int snapshotId,
- ContentSummaryComputationContext summary) throws AccessControlException {
+ ContentSummaryComputationContext summary) {
computeContentSummary(snapshotId, summary);
final ContentCounts counts = summary.getCounts();
final ContentCounts snapshotCounts = summary.getSnapshotCounts();
@@ -463,8 +461,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* @return The same objects as summary.
*/
public abstract ContentSummaryComputationContext computeContentSummary(
- int snapshotId, ContentSummaryComputationContext summary)
- throws AccessControlException;
+ int snapshotId, ContentSummaryComputationContext summary);
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 3b7fa4e..4012783 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -26,7 +26,6 @@ import java.util.List;
import java.util.Map;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
-import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr;
@@ -44,7 +43,6 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import org.apache.hadoop.security.AccessControlException;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
@@ -634,7 +632,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
@Override
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
- ContentSummaryComputationContext summary) throws AccessControlException {
+ ContentSummaryComputationContext summary) {
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf != null && snapshotId == Snapshot.CURRENT_STATE_ID) {
final ContentCounts counts = new ContentCounts.Builder().build();
@@ -656,10 +654,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
}
protected ContentSummaryComputationContext computeDirectoryContentSummary(
- ContentSummaryComputationContext summary, int snapshotId)
- throws AccessControlException{
- // throws exception if failing the permission check
- summary.checkPermission(this, snapshotId, FsAction.READ_EXECUTE);
+ ContentSummaryComputationContext summary, int snapshotId) {
ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
// Explicit traversing is done to enable repositioning after relinquishing
// and reacquiring locks.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index db2026d..1b85237 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeat
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import com.google.common.base.Preconditions;
-import org.apache.hadoop.security.AccessControlException;
/**
* An anonymous reference to an inode.
@@ -315,7 +314,7 @@ public abstract class INodeReference extends INode {
@Override
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
- ContentSummaryComputationContext summary) throws AccessControlException {
+ ContentSummaryComputationContext summary) {
return referred.computeContentSummary(snapshotId, summary);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index 0ab928d..fbfc278 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
-import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
@@ -235,7 +234,7 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
@Override
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
- final ContentCounts counts) throws AccessControlException {
+ final ContentCounts counts) {
counts.addContent(Content.SNAPSHOT, snapshotsByNames.size());
counts.addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
super.computeContentSummary4Snapshot(bsps, counts);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 7535879..0111b3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.util.Diff.UndoInfo;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Preconditions;
-import org.apache.hadoop.security.AccessControlException;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
@@ -631,7 +630,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
}
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
- final ContentCounts counts) throws AccessControlException {
+ final ContentCounts counts) {
// Create a new blank summary context for blocking processing of subtree.
ContentSummaryComputationContext summary =
new ContentSummaryComputationContext(bsps);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
index 515f164..e98e766 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
-import org.apache.hadoop.security.AccessControlException;
/** Snapshot of a sub-tree in the namesystem. */
@InterfaceAudience.Private
@@ -177,8 +176,7 @@ public class Snapshot implements Comparable<byte[]> {
@Override
public ContentSummaryComputationContext computeContentSummary(
- int snapshotId, ContentSummaryComputationContext summary)
- throws AccessControlException {
+ int snapshotId, ContentSummaryComputationContext summary) {
return computeDirectoryContentSummary(summary, snapshotId);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
deleted file mode 100644
index 03aa440..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.security.PrivilegedExceptionAction;
-
-import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * This class tests get content summary with permission settings.
- */
-public class TestGetContentSummaryWithPermission {
- protected static final short REPLICATION = 3;
- protected static final long BLOCKSIZE = 1024;
-
- private Configuration conf;
- private MiniDFSCluster cluster;
- private DistributedFileSystem dfs;
-
- @Before
- public void setUp() throws Exception {
- conf = new Configuration();
- conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
- cluster =
- new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
- cluster.waitActive();
-
- dfs = cluster.getFileSystem();
- }
-
- @After
- public void tearDown() throws Exception {
- if (cluster != null) {
- cluster.shutdown();
- cluster = null;
- }
- }
-
- /**
- * Test getContentSummary for super user. For super user, whatever
- * permission the directories are with, always allowed to access
- *
- * @throws Exception
- */
- @Test
- public void testGetContentSummarySuperUser() throws Exception {
- final Path foo = new Path("/fooSuper");
- final Path bar = new Path(foo, "barSuper");
- final Path baz = new Path(bar, "bazSuper");
- dfs.mkdirs(bar);
- DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
-
- ContentSummary summary;
-
- summary = cluster.getNameNodeRpc().getContentSummary(
- foo.toString());
- verifySummary(summary, 2, 1, 10);
-
- dfs.setPermission(foo, new FsPermission((short)0));
-
- summary = cluster.getNameNodeRpc().getContentSummary(
- foo.toString());
- verifySummary(summary, 2, 1, 10);
-
- dfs.setPermission(bar, new FsPermission((short)0));
-
- summary = cluster.getNameNodeRpc().getContentSummary(
- foo.toString());
- verifySummary(summary, 2, 1, 10);
-
- dfs.setPermission(baz, new FsPermission((short)0));
-
- summary = cluster.getNameNodeRpc().getContentSummary(
- foo.toString());
- verifySummary(summary, 2, 1, 10);
- }
-
- /**
- * Test getContentSummary for non-super, non-owner. Such users are restricted
- * by permission of subdirectories. Namely if there is any subdirectory that
- * does not have READ_EXECUTE access, AccessControlException will be thrown.
- *
- * @throws Exception
- */
- @Test
- public void testGetContentSummaryNonSuperUser() throws Exception {
- final Path foo = new Path("/fooNoneSuper");
- final Path bar = new Path(foo, "barNoneSuper");
- final Path baz = new Path(bar, "bazNoneSuper");
- // run as some random non-superuser, non-owner user.
- final UserGroupInformation userUgi =
- UserGroupInformation.createUserForTesting(
- "randomUser", new String[]{"randomGroup"});
- dfs.mkdirs(bar);
- DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
-
- // by default, permission is rwxr-xr-x, as long as READ and EXECUTE are set,
- // content summary should accessible
- FileStatus fileStatus;
- fileStatus = dfs.getFileStatus(foo);
- assertEquals((short)755, fileStatus.getPermission().toOctal());
- fileStatus = dfs.getFileStatus(bar);
- assertEquals((short)755, fileStatus.getPermission().toOctal());
- // file has no EXECUTE, it is rw-r--r-- default
- fileStatus = dfs.getFileStatus(baz);
- assertEquals((short)644, fileStatus.getPermission().toOctal());
-
- // by default, can get content summary
- ContentSummary summary =
- userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
- () -> cluster.getNameNodeRpc().getContentSummary(
- foo.toString()));
- verifySummary(summary, 2, 1, 10);
-
- // set empty access on root dir, should disallow content summary
- dfs.setPermission(foo, new FsPermission((short)0));
- try {
- userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
- () -> cluster.getNameNodeRpc().getContentSummary(
- foo.toString()));
- fail("Should've fail due to access control exception.");
- } catch (AccessControlException e) {
- assertTrue(e.getMessage().contains("Permission denied"));
- }
-
- // restore foo's permission to allow READ_EXECUTE
- dfs.setPermission(foo,
- new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
-
- // set empty access on subdir, should disallow content summary from root dir
- dfs.setPermission(bar, new FsPermission((short)0));
-
- try {
- userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
- () -> cluster.getNameNodeRpc().getContentSummary(
- foo.toString()));
- fail("Should've fail due to access control exception.");
- } catch (AccessControlException e) {
- assertTrue(e.getMessage().contains("Permission denied"));
- }
-
- // restore the permission of subdir to READ_EXECUTE. enable
- // getContentSummary again for root
- dfs.setPermission(bar,
- new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
-
- summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
- () -> cluster.getNameNodeRpc().getContentSummary(
- foo.toString()));
- verifySummary(summary, 2, 1, 10);
-
- // permission of files under the directory does not affect
- // getContentSummary
- dfs.setPermission(baz, new FsPermission((short)0));
- summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
- () -> cluster.getNameNodeRpc().getContentSummary(
- foo.toString()));
- verifySummary(summary, 2, 1, 10);
- }
-
- private void verifySummary(ContentSummary summary, int dirCount,
- int fileCount, int length) {
- assertEquals(dirCount, summary.getDirectoryCount());
- assertEquals(fileCount, summary.getFileCount());
- assertEquals(length, summary.getLength());
- }
-
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[02/50] [abbrv] hadoop git commit: HDFS-12037. Ozone: Improvement
rest API output format for better looking. Contributed by Weiwei Yang.
Posted by xy...@apache.org.
HDFS-12037. Ozone: Improvement rest API output format for better looking. Contributed by Weiwei Yang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6798e6dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6798e6dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6798e6dd
Branch: refs/heads/HDFS-7240
Commit: 6798e6dd71316008fcec9fb891689e1c66311608
Parents: 3bc9202
Author: Weiwei Yang <ww...@apache.org>
Authored: Sat Jul 8 10:06:58 2017 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Wed Jul 12 17:11:46 2017 -0700
----------------------------------------------------------------------
.../java/org/apache/hadoop/ozone/web/response/BucketInfo.java | 3 ++-
.../main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java | 3 ++-
.../java/org/apache/hadoop/ozone/web/response/ListBuckets.java | 3 ++-
.../main/java/org/apache/hadoop/ozone/web/response/ListKeys.java | 3 ++-
.../java/org/apache/hadoop/ozone/web/response/ListVolumes.java | 3 ++-
.../java/org/apache/hadoop/ozone/web/response/VolumeInfo.java | 3 ++-
6 files changed, 12 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6798e6dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
index 1e47c16..53c7119 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
@@ -56,7 +56,8 @@ public class BucketInfo implements Comparable<BucketInfo> {
mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY);
mapper.addMixIn(Object.class, MixIn.class);
- WRITER = mapper.writer(filters);
+ mapper.setFilterProvider(filters);
+ WRITER = mapper.writerWithDefaultPrettyPrinter();
}
private String volumeName;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6798e6dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
index 69be5b9..e5cfd21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
@@ -54,7 +54,8 @@ public class KeyInfo implements Comparable<KeyInfo> {
JsonAutoDetect.Visibility.ANY);
mapper.addMixIn(Object.class, MixIn.class);
- WRITER = mapper.writer(filters);
+ mapper.setFilterProvider(filters);
+ WRITER = mapper.writerWithDefaultPrettyPrinter();
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6798e6dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
index 3b0d32e..bc4e65b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
@@ -55,7 +55,8 @@ public class ListBuckets {
JsonAutoDetect.Visibility.ANY);
mapper.addMixIn(Object.class, MixIn.class);
- WRITER = mapper.writer(filters);
+ mapper.setFilterProvider(filters);
+ WRITER = mapper.writerWithDefaultPrettyPrinter();
}
private List<BucketInfo> buckets;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6798e6dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
index fd76e4a..9dc77d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
@@ -58,7 +58,8 @@ public class ListKeys {
JsonAutoDetect.Visibility.ANY);
mapper.addMixIn(Object.class, MixIn.class);
- WRITER = mapper.writer(filters);
+ mapper.setFilterProvider(filters);
+ WRITER = mapper.writerWithDefaultPrettyPrinter();
}
private String name;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6798e6dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java
index 797c0ef..b918349 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java
@@ -59,7 +59,8 @@ public class ListVolumes {
JsonAutoDetect.Visibility.ANY);
mapper.addMixIn(Object.class, MixIn.class);
- WRITER = mapper.writer(filters);
+ mapper.setFilterProvider(filters);
+ WRITER = mapper.writerWithDefaultPrettyPrinter();
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6798e6dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java
index 8e6ec902..112b27e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java
@@ -58,7 +58,8 @@ public class VolumeInfo implements Comparable<VolumeInfo> {
JsonAutoDetect.Visibility.ANY);
mapper.addMixIn(Object.class, MixIn.class);
- WRITER = mapper.writer(filters);
+ mapper.setFilterProvider(filters);
+ WRITER = mapper.writerWithDefaultPrettyPrinter();
}
/**
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org