You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/04/19 01:35:39 UTC
svn commit: r1327724 [3/3] - in
/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/
hadoop-hdfs...
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java?rev=1327724&r1=1327723&r2=1327724&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java Wed Apr 18 23:35:30 2012
@@ -18,48 +18,105 @@
package org.apache.hadoop.hdfs.server.datanode;
-import java.net.InetSocketAddress;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.VersionInfo;
+import org.junit.Before;
import org.junit.Test;
-import org.mockito.Mockito;
-
public class TestDatanodeRegister {
public static final Log LOG = LogFactory.getLog(TestDatanodeRegister.class);
// Invalid address
- static final InetSocketAddress INVALID_ADDR =
+ private static final InetSocketAddress INVALID_ADDR =
new InetSocketAddress("127.0.0.1", 1);
-
- @Test
- public void testDataNodeRegister() throws Exception {
+
+ private BPServiceActor actor;
+ NamespaceInfo fakeNsInfo;
+ DNConf mockDnConf;
+
+ @Before
+ public void setUp() throws IOException {
+ mockDnConf = mock(DNConf.class);
+ doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion();
+
DataNode mockDN = mock(DataNode.class);
- Mockito.doReturn(true).when(mockDN).shouldRun();
+ doReturn(true).when(mockDN).shouldRun();
+ doReturn(mockDnConf).when(mockDN).getDnConf();
- BPOfferService mockBPOS = Mockito.mock(BPOfferService.class);
- Mockito.doReturn(mockDN).when(mockBPOS).getDataNode();
+ BPOfferService mockBPOS = mock(BPOfferService.class);
+ doReturn(mockDN).when(mockBPOS).getDataNode();
- BPServiceActor actor = new BPServiceActor(INVALID_ADDR, mockBPOS);
+ actor = new BPServiceActor(INVALID_ADDR, mockBPOS);
- NamespaceInfo fakeNSInfo = mock(NamespaceInfo.class);
- when(fakeNSInfo.getBuildVersion()).thenReturn("NSBuildVersion");
- DatanodeProtocolClientSideTranslatorPB fakeDNProt =
+ fakeNsInfo = mock(NamespaceInfo.class);
+ // Return a a good software version.
+ doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
+ // Return a good layout version for now.
+ doReturn(HdfsConstants.LAYOUT_VERSION).when(fakeNsInfo).getLayoutVersion();
+
+ DatanodeProtocolClientSideTranslatorPB fakeDnProt =
mock(DatanodeProtocolClientSideTranslatorPB.class);
- when(fakeDNProt.versionRequest()).thenReturn(fakeNSInfo);
+ when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo);
+ actor.setNameNode(fakeDnProt);
+ }
- actor.setNameNode( fakeDNProt );
- try {
+ @Test
+ public void testSoftwareVersionDifferences() throws Exception {
+ // We expect no exception to be thrown when the software versions match.
+ assertEquals(VersionInfo.getVersion(),
+ actor.retrieveNamespaceInfo().getSoftwareVersion());
+
+ // We expect no exception to be thrown when the min NN version is below the
+ // reported NN version.
+ doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion();
+ doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion();
+ assertEquals("4.0.0", actor.retrieveNamespaceInfo().getSoftwareVersion());
+
+ // When the NN reports a version that's too low, throw an exception.
+ doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion();
+ doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion();
+ try {
+ actor.retrieveNamespaceInfo();
+ fail("Should have thrown an exception for NN with too-low version");
+ } catch (IncorrectVersionException ive) {
+ GenericTestUtils.assertExceptionContains(
+ "The reported NameNode version is too low", ive);
+ LOG.info("Got expected exception", ive);
+ }
+ }
+
+ @Test
+ public void testDifferentLayoutVersions() throws Exception {
+ // We expect no exceptions to be thrown when the layout versions match.
+ assertEquals(HdfsConstants.LAYOUT_VERSION,
+ actor.retrieveNamespaceInfo().getLayoutVersion());
+
+ // We expect an exception to be thrown when the NN reports a layout version
+ // different from that of the DN.
+ doReturn(HdfsConstants.LAYOUT_VERSION * 1000).when(fakeNsInfo)
+ .getLayoutVersion();
+ try {
actor.retrieveNamespaceInfo();
- fail("register() did not throw exception! " +
- "Expected: IncorrectVersionException");
- } catch (IncorrectVersionException ie) {
- LOG.info("register() returned correct Exception: IncorrectVersionException");
+ fail("Should have failed to retrieve NS info from DN with bad layout version");
+ } catch (IncorrectVersionException ive) {
+ GenericTestUtils.assertExceptionContains(
+ "Unexpected version of namenode", ive);
+ LOG.info("Got expected exception", ive);
}
}
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java?rev=1327724&r1=1327723&r2=1327724&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java Wed Apr 18 23:35:30 2012
@@ -43,7 +43,7 @@ public class TestJournalService {
private Configuration conf = new HdfsConfiguration();
/**
- * Test calls backs {@link JournalListener#rollLogs(JournalService, long)} and
+ * Test calls backs {@link JournalListener#startLogSegment(JournalService, long)} and
* {@link JournalListener#journal(JournalService, long, int, byte[])} are
* called.
*/
@@ -85,7 +85,7 @@ public class TestJournalService {
*/
private void verifyRollLogsCallback(JournalService s, JournalListener l)
throws IOException {
- Mockito.verify(l, Mockito.times(1)).rollLogs(Mockito.eq(s), Mockito.anyLong());
+ Mockito.verify(l, Mockito.times(1)).startLogSegment(Mockito.eq(s), Mockito.anyLong());
}
/**
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1327724&r1=1327723&r2=1327724&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Wed Apr 18 23:35:30 2012
@@ -58,6 +58,7 @@ import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.VersionInfo;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
@@ -783,6 +784,7 @@ public class NNThroughputBenchmark {
String hostName = DNS.getDefaultHost("default", "default");
dnRegistration = new DatanodeRegistration(ipAddr, getNodePort(dnIdx));
dnRegistration.setHostName(hostName);
+ dnRegistration.setSoftwareVersion(VersionInfo.getVersion());
this.blocks = new ArrayList<Block>(blockCapacity);
this.nrBlocks = 0;
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1327724&r1=1327723&r2=1327724&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Wed Apr 18 23:35:30 2012
@@ -141,6 +141,20 @@ public class TestEditLog extends TestCas
}
}
}
+
+ /**
+ * Construct FSEditLog with default configuration, taking editDirs from NNStorage
+ *
+ * @param storage Storage object used by namenode
+ */
+ private static FSEditLog getFSEditLog(NNStorage storage) throws IOException {
+ Configuration conf = new Configuration();
+ // Make sure the edits dirs are set in the provided configuration object.
+ conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+ StringUtils.join(",", storage.getEditsDirectories()));
+ FSEditLog log = new FSEditLog(conf, storage, FSNamesystem.getNamespaceEditsDirs(conf));
+ return log;
+ }
/**
* Test case for an empty edit log from a prior version of Hadoop.
@@ -863,7 +877,7 @@ public class TestEditLog extends TestCas
storage = mockStorageWithEdits(
"[1,100]|[101,200]|[201,]",
"[1,100]|[101,200]|[201,]");
- log = new FSEditLog(storage);
+ log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",
log.getEditLogManifest(1).toString());
@@ -875,7 +889,7 @@ public class TestEditLog extends TestCas
storage = mockStorageWithEdits(
"[1,100]|[101,200]",
"[1,100]|[201,300]|[301,400]"); // nothing starting at 101
- log = new FSEditLog(storage);
+ log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200], [201,300], [301,400]]",
log.getEditLogManifest(1).toString());
@@ -885,7 +899,7 @@ public class TestEditLog extends TestCas
storage = mockStorageWithEdits(
"[1,100]|[301,400]", // gap from 101 to 300
"[301,400]|[401,500]");
- log = new FSEditLog(storage);
+ log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[301,400], [401,500]]",
log.getEditLogManifest(1).toString());
@@ -895,7 +909,7 @@ public class TestEditLog extends TestCas
storage = mockStorageWithEdits(
"[1,100]|[101,150]", // short log at 101
"[1,50]|[101,200]"); // short log at 1
- log = new FSEditLog(storage);
+ log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",
log.getEditLogManifest(1).toString());
@@ -908,7 +922,7 @@ public class TestEditLog extends TestCas
storage = mockStorageWithEdits(
"[1,100]|[101,]",
"[1,100]|[101,200]");
- log = new FSEditLog(storage);
+ log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",
log.getEditLogManifest(1).toString());
@@ -998,7 +1012,7 @@ public class TestEditLog extends TestCas
Collections.<URI>emptyList(),
editUris);
storage.format(new NamespaceInfo());
- FSEditLog editlog = new FSEditLog(storage);
+ FSEditLog editlog = getFSEditLog(storage);
// open the edit log and add two transactions
// logGenerationStamp is used, simply because it doesn't
// require complex arguments.
@@ -1080,7 +1094,7 @@ public class TestEditLog extends TestCas
new AbortSpec(9, 0),
new AbortSpec(10, 1));
long totaltxnread = 0;
- FSEditLog editlog = new FSEditLog(storage);
+ FSEditLog editlog = getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId = 1;
Iterable<EditLogInputStream> editStreams = editlog.selectInputStreams(startTxId,
@@ -1130,7 +1144,7 @@ public class TestEditLog extends TestCas
assertEquals(1, files.length);
assertTrue(files[0].delete());
- FSEditLog editlog = new FSEditLog(storage);
+ FSEditLog editlog = getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId = 1;
try {
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java?rev=1327724&r1=1327723&r2=1327724&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java Wed Apr 18 23:35:30 2012
@@ -490,4 +490,46 @@ public class TestNameEditsConfigs {
cluster.shutdown();
}
}
+
+ /**
+ * Test dfs.namenode.checkpoint.dir and dfs.namenode.checkpoint.edits.dir
+ * should tolerate white space between values.
+ */
+ @Test
+ public void testCheckPointDirsAreTrimmed() throws Exception {
+ MiniDFSCluster cluster = null;
+ SecondaryNameNode secondary = null;
+ File checkpointNameDir1 = new File(base_dir, "chkptName1");
+ File checkpointEditsDir1 = new File(base_dir, "chkptEdits1");
+ File checkpointNameDir2 = new File(base_dir, "chkptName2");
+ File checkpointEditsDir2 = new File(base_dir, "chkptEdits2");
+ File nameDir = new File(base_dir, "name1");
+ String whiteSpace = " \n \n ";
+ Configuration conf = new HdfsConfiguration();
+ conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getPath());
+ conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, whiteSpace
+ + checkpointNameDir1.getPath() + whiteSpace, whiteSpace
+ + checkpointNameDir2.getPath() + whiteSpace);
+ conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
+ whiteSpace + checkpointEditsDir1.getPath() + whiteSpace, whiteSpace
+ + checkpointEditsDir2.getPath() + whiteSpace);
+ cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false)
+ .numDataNodes(3).build();
+ try {
+ cluster.waitActive();
+ secondary = startSecondaryNameNode(conf);
+ secondary.doCheckpoint();
+ assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
+ checkpointNameDir1.exists());
+ assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
+ checkpointNameDir2.exists());
+ assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
+ + " must be trimmed ", checkpointEditsDir1.exists());
+ assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
+ + " must be trimmed ", checkpointEditsDir2.exists());
+ } finally {
+ secondary.shutdown();
+ cluster.shutdown();
+ }
+ }
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1327724&r1=1327723&r2=1327724&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Wed Apr 18 23:35:30 2012
@@ -101,6 +101,8 @@ public class TestNameNodeMXBean {
assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0);
assertTrue(liveNode.containsKey("capacity"));
assertTrue(((Long)liveNode.get("capacity")) > 0);
+ assertTrue(liveNode.containsKey("numBlocks"));
+ assertTrue(((Long)liveNode.get("numBlocks")) == 0);
}
Assert.assertEquals(fsn.getLiveNodes(), alivenodeinfo);
// get attribute deadnodeinfo
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java?rev=1327724&r1=1327723&r2=1327724&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java Wed Apr 18 23:35:30 2012
@@ -26,10 +26,12 @@ import java.net.InetSocketAddress;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
+import java.util.HashSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -42,11 +44,14 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.SecurityUtilTestHelper;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass;
+import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -98,6 +103,11 @@ public class TestDelegationTokensWithHA
}
+ @Before
+ public void prepTest() {
+ SecurityUtilTestHelper.setTokenServiceUseIp(true);
+ }
+
@Test
public void testDelegationTokenDFSApi() throws Exception {
Token<DelegationTokenIdentifier> token = dfs.getDelegationToken("JobTracker");
@@ -185,24 +195,48 @@ public class TestDelegationTokensWithHA
URI haUri = new URI("hdfs://my-ha-uri/");
token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri));
ugi.addToken(token);
- HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nn0.getNameNodeAddress());
- HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nn1.getNameNodeAddress());
+
+ Collection<InetSocketAddress> nnAddrs = new HashSet<InetSocketAddress>();
+ nnAddrs.add(nn0.getNameNodeAddress());
+ nnAddrs.add(nn1.getNameNodeAddress());
+ HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs);
Collection<Token<? extends TokenIdentifier>> tokens = ugi.getTokens();
assertEquals(3, tokens.size());
LOG.info("Tokens:\n" + Joiner.on("\n").join(tokens));
+ DelegationTokenSelector dts = new DelegationTokenSelector();
// check that the token selected for one of the physical IPC addresses
// matches the one we received
- InetSocketAddress addr = nn0.getNameNodeAddress();
- Text ipcDtService = new Text(
- addr.getAddress().getHostAddress() + ":" + addr.getPort());
- Token<DelegationTokenIdentifier> token2 =
- DelegationTokenSelector.selectHdfsDelegationToken(ipcDtService, ugi);
- assertNotNull(token2);
- assertArrayEquals(token.getIdentifier(), token2.getIdentifier());
- assertArrayEquals(token.getPassword(), token2.getPassword());
+ for (InetSocketAddress addr : nnAddrs) {
+ Text ipcDtService = SecurityUtil.buildTokenService(addr);
+ Token<DelegationTokenIdentifier> token2 =
+ dts.selectToken(ipcDtService, ugi.getTokens());
+ assertNotNull(token2);
+ assertArrayEquals(token.getIdentifier(), token2.getIdentifier());
+ assertArrayEquals(token.getPassword(), token2.getPassword());
+ }
+
+ // switch to host-based tokens, shouldn't match existing tokens
+ SecurityUtilTestHelper.setTokenServiceUseIp(false);
+ for (InetSocketAddress addr : nnAddrs) {
+ Text ipcDtService = SecurityUtil.buildTokenService(addr);
+ Token<DelegationTokenIdentifier> token2 =
+ dts.selectToken(ipcDtService, ugi.getTokens());
+ assertNull(token2);
+ }
+
+ // reclone the tokens, and see if they match now
+ HAUtil.cloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs);
+ for (InetSocketAddress addr : nnAddrs) {
+ Text ipcDtService = SecurityUtil.buildTokenService(addr);
+ Token<DelegationTokenIdentifier> token2 =
+ dts.selectToken(ipcDtService, ugi.getTokens());
+ assertNotNull(token2);
+ assertArrayEquals(token.getIdentifier(), token2.getIdentifier());
+ assertArrayEquals(token.getPassword(), token2.getPassword());
+ }
}
/**
@@ -212,8 +246,30 @@ public class TestDelegationTokensWithHA
*/
@Test
public void testDFSGetCanonicalServiceName() throws Exception {
- assertEquals(fs.getCanonicalServiceName(),
- HATestUtil.getLogicalUri(cluster).getHost());
+ URI hAUri = HATestUtil.getLogicalUri(cluster);
+ String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri).toString();
+ assertEquals(haService, dfs.getCanonicalServiceName());
+ Token<?> token = dfs.getDelegationToken(
+ UserGroupInformation.getCurrentUser().getShortUserName());
+ assertEquals(haService, token.getService().toString());
+ // make sure the logical uri is handled correctly
+ token.renew(dfs.getConf());
+ token.cancel(dfs.getConf());
+ }
+
+ @Test
+ public void testHdfsGetCanonicalServiceName() throws Exception {
+ Configuration conf = dfs.getConf();
+ URI haUri = HATestUtil.getLogicalUri(cluster);
+ AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf);
+ String haService = HAUtil.buildTokenServiceForLogicalUri(haUri).toString();
+ assertEquals(haService, afs.getCanonicalServiceName());
+ Token<?> token = afs.getDelegationTokens(
+ UserGroupInformation.getCurrentUser().getShortUserName()).get(0);
+ assertEquals(haService, token.getService().toString());
+ // make sure the logical uri is handled correctly
+ token.renew(conf);
+ token.cancel(conf);
}
enum TokenTestAction {
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java?rev=1327724&r1=1327723&r2=1327724&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java Wed Apr 18 23:35:30 2012
@@ -19,17 +19,24 @@ package org.apache.hadoop.hdfs.server.na
import java.io.File;
import java.io.IOException;
+import java.net.URISyntaxException;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
@@ -48,7 +55,10 @@ public class TestInitializeSharedEdits {
@Before
public void setupCluster() throws IOException {
conf = new Configuration();
-
+ conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
+ conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+ HAUtil.setAllowStandbyReads(conf, true);
+
MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
cluster = new MiniDFSCluster.Builder(conf)
@@ -56,11 +66,8 @@ public class TestInitializeSharedEdits {
.numDataNodes(0)
.build();
cluster.waitActive();
-
- cluster.shutdownNameNode(0);
- cluster.shutdownNameNode(1);
- File sharedEditsDir = new File(cluster.getSharedEditsDir(0, 1));
- assertTrue(FileUtil.fullyDelete(sharedEditsDir));
+
+ shutdownClusterAndRemoveSharedEditsDir();
}
@After
@@ -70,8 +77,14 @@ public class TestInitializeSharedEdits {
}
}
- @Test
- public void testInitializeSharedEdits() throws Exception {
+ private void shutdownClusterAndRemoveSharedEditsDir() throws IOException {
+ cluster.shutdownNameNode(0);
+ cluster.shutdownNameNode(1);
+ File sharedEditsDir = new File(cluster.getSharedEditsDir(0, 1));
+ assertTrue(FileUtil.fullyDelete(sharedEditsDir));
+ }
+
+ private void assertCannotStartNameNodes() {
// Make sure we can't currently start either NN.
try {
cluster.restartNameNode(0, false);
@@ -89,24 +102,28 @@ public class TestInitializeSharedEdits {
GenericTestUtils.assertExceptionContains(
"Cannot start an HA namenode with name dirs that need recovery", ioe);
}
-
- // Initialize the shared edits dir.
- assertFalse(NameNode.initializeSharedEdits(conf));
-
+ }
+
+ private void assertCanStartHaNameNodes(String pathSuffix)
+ throws ServiceFailedException, IOException, URISyntaxException,
+ InterruptedException {
// Now should be able to start both NNs. Pass "false" here so that we don't
// try to waitActive on all NNs, since the second NN doesn't exist yet.
cluster.restartNameNode(0, false);
cluster.restartNameNode(1, true);
// Make sure HA is working.
- cluster.transitionToActive(0);
+ cluster.getNameNode(0).getRpcServer().transitionToActive(
+ new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FileSystem fs = null;
try {
+ Path newPath = new Path(TEST_PATH, pathSuffix);
fs = HATestUtil.configureFailoverFs(cluster, conf);
- assertTrue(fs.mkdirs(TEST_PATH));
- cluster.transitionToStandby(0);
- cluster.transitionToActive(1);
- assertTrue(fs.isDirectory(TEST_PATH));
+ assertTrue(fs.mkdirs(newPath));
+ HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
+ cluster.getNameNode(1));
+ assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
+ newPath.toString(), false).isDir());
} finally {
if (fs != null) {
fs.close();
@@ -115,6 +132,29 @@ public class TestInitializeSharedEdits {
}
@Test
+ public void testInitializeSharedEdits() throws Exception {
+ assertCannotStartNameNodes();
+
+ // Initialize the shared edits dir.
+ assertFalse(NameNode.initializeSharedEdits(cluster.getConfiguration(0)));
+
+ assertCanStartHaNameNodes("1");
+
+ // Now that we've done a metadata operation, make sure that deleting and
+ // re-initializing the shared edits dir will let the standby still start.
+
+ shutdownClusterAndRemoveSharedEditsDir();
+
+ assertCannotStartNameNodes();
+
+ // Re-initialize the shared edits dir.
+ assertFalse(NameNode.initializeSharedEdits(cluster.getConfiguration(0)));
+
+ // Should *still* be able to start both NNs
+ assertCanStartHaNameNodes("2");
+ }
+
+ @Test
public void testDontOverWriteExistingDir() {
assertFalse(NameNode.initializeSharedEdits(conf, false));
assertTrue(NameNode.initializeSharedEdits(conf, false));
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java?rev=1327724&r1=1327723&r2=1327724&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java Wed Apr 18 23:35:30 2012
@@ -34,10 +34,16 @@ import org.apache.hadoop.hdfs.web.resour
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.SecurityUtilTestHelper;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
import org.junit.Assert;
import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
import static org.mockito.Mockito.mock;
public class TestWebHdfsUrl {
@@ -90,4 +96,60 @@ public class TestWebHdfsUrl {
private String generateUrlQueryPrefix(HttpOpParam.Op op, String username) {
return "op=" + op.toString() + "&user.name=" + username;
}
+
+ @Test
+ public void testSelectDelegationToken() throws Exception {
+ SecurityUtilTestHelper.setTokenServiceUseIp(true);
+
+ Configuration conf = new Configuration();
+ URI webHdfsUri = URI.create("webhdfs://localhost:0");
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ Token<?> token = null;
+
+ // test fallback to hdfs token
+ Token<?> hdfsToken = new Token<TokenIdentifier>(
+ new byte[0], new byte[0],
+ DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
+ new Text("127.0.0.1:8020"));
+ ugi.addToken(hdfsToken);
+
+ WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(webHdfsUri, conf);
+ token = fs.selectDelegationToken();
+ assertNotNull(token);
+ assertEquals(hdfsToken, token);
+
+ // test webhdfs is favored over hdfs
+ Token<?> webHdfsToken = new Token<TokenIdentifier>(
+ new byte[0], new byte[0],
+ WebHdfsFileSystem.TOKEN_KIND, new Text("127.0.0.1:0"));
+ ugi.addToken(webHdfsToken);
+ token = fs.selectDelegationToken();
+ assertNotNull(token);
+ assertEquals(webHdfsToken, token);
+
+ // switch to using host-based tokens, no token should match
+ SecurityUtilTestHelper.setTokenServiceUseIp(false);
+ token = fs.selectDelegationToken();
+ assertNull(token);
+
+ // test fallback to hdfs token
+ hdfsToken = new Token<TokenIdentifier>(
+ new byte[0], new byte[0],
+ DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
+ new Text("localhost:8020"));
+ ugi.addToken(hdfsToken);
+ token = fs.selectDelegationToken();
+ assertNotNull(token);
+ assertEquals(hdfsToken, token);
+
+ // test webhdfs is favored over hdfs
+ webHdfsToken = new Token<TokenIdentifier>(
+ new byte[0], new byte[0],
+ WebHdfsFileSystem.TOKEN_KIND, new Text("localhost:0"));
+ ugi.addToken(webHdfsToken);
+ token = fs.selectDelegationToken();
+ assertNotNull(token);
+ assertEquals(webHdfsToken, token);
+ }
+
}