You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2014/07/25 22:33:22 UTC
svn commit: r1613514 [5/6] - in
/hadoop/common/branches/YARN-1051/hadoop-hdfs-project:
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/
hadoop-hdfs-nfs/src/test/java/org/apac...
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java Fri Jul 25 20:33:09 2014
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.test.PathUtils;
@@ -101,6 +102,7 @@ public class TestReplicationPolicyConsid
}
}
+ private final double EPSILON = 0.0001;
/**
* Tests that chooseTarget with considerLoad set to true correctly calculates
* load with decommissioned nodes.
@@ -109,14 +111,6 @@ public class TestReplicationPolicyConsid
public void testChooseTargetWithDecomNodes() throws IOException {
namenode.getNamesystem().writeLock();
try {
- // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
- // returns false
- for (int i = 0; i < 3; i++) {
- DatanodeInfo d = dnManager.getDatanodeByXferAddr(
- dnrList.get(i).getIpAddr(),
- dnrList.get(i).getXferPort());
- d.setDecommissioned();
- }
String blockPoolId = namenode.getNamesystem().getBlockPoolId();
dnManager.handleHeartbeat(dnrList.get(3),
BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
@@ -133,6 +127,20 @@ public class TestReplicationPolicyConsid
blockPoolId, dataNodes[5].getCacheCapacity(),
dataNodes[5].getCacheRemaining(),
4, 0, 0);
+ // value in the above heartbeats
+ final int load = 2 + 4 + 4;
+
+ FSNamesystem fsn = namenode.getNamesystem();
+ assertEquals((double)load/6, fsn.getInServiceXceiverAverage(), EPSILON);
+
+ // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
+ // returns false
+ for (int i = 0; i < 3; i++) {
+ DatanodeDescriptor d = dnManager.getDatanode(dnrList.get(i));
+ dnManager.startDecommission(d);
+ d.setDecommissioned();
+ }
+ assertEquals((double)load/3, fsn.getInServiceXceiverAverage(), EPSILON);
// Call chooseTarget()
DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java Fri Jul 25 20:33:09 2014
@@ -591,51 +591,50 @@ public class TestReplicationPolicyWithNo
*/
@Test
public void testChooseReplicaToDelete() throws Exception {
- List<DatanodeDescriptor> replicaNodeList =
- new ArrayList<DatanodeDescriptor>();
- final Map<String, List<DatanodeDescriptor>> rackMap =
- new HashMap<String, List<DatanodeDescriptor>>();
+ List<DatanodeStorageInfo> replicaList = new ArrayList<DatanodeStorageInfo>();
+ final Map<String, List<DatanodeStorageInfo>> rackMap
+ = new HashMap<String, List<DatanodeStorageInfo>>();
dataNodes[0].setRemaining(4*1024*1024);
- replicaNodeList.add(dataNodes[0]);
+ replicaList.add(storages[0]);
dataNodes[1].setRemaining(3*1024*1024);
- replicaNodeList.add(dataNodes[1]);
+ replicaList.add(storages[1]);
dataNodes[2].setRemaining(2*1024*1024);
- replicaNodeList.add(dataNodes[2]);
+ replicaList.add(storages[2]);
dataNodes[5].setRemaining(1*1024*1024);
- replicaNodeList.add(dataNodes[5]);
+ replicaList.add(storages[5]);
- List<DatanodeDescriptor> first = new ArrayList<DatanodeDescriptor>();
- List<DatanodeDescriptor> second = new ArrayList<DatanodeDescriptor>();
+ List<DatanodeStorageInfo> first = new ArrayList<DatanodeStorageInfo>();
+ List<DatanodeStorageInfo> second = new ArrayList<DatanodeStorageInfo>();
replicator.splitNodesWithRack(
- replicaNodeList, rackMap, first, second);
+ replicaList, rackMap, first, second);
assertEquals(3, first.size());
assertEquals(1, second.size());
- DatanodeDescriptor chosenNode = replicator.chooseReplicaToDelete(
+ DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete(
null, null, (short)3, first, second);
// Within first set {dataNodes[0], dataNodes[1], dataNodes[2]},
// dataNodes[0] and dataNodes[1] are in the same nodegroup,
// but dataNodes[1] is chosen as less free space
- assertEquals(chosenNode, dataNodes[1]);
+ assertEquals(chosen, storages[1]);
- replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosenNode);
+ replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(2, first.size());
assertEquals(1, second.size());
// Within first set {dataNodes[0], dataNodes[2]}, dataNodes[2] is chosen
// as less free space
- chosenNode = replicator.chooseReplicaToDelete(
+ chosen = replicator.chooseReplicaToDelete(
null, null, (short)2, first, second);
- assertEquals(chosenNode, dataNodes[2]);
+ assertEquals(chosen, storages[2]);
- replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosenNode);
+ replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(0, first.size());
assertEquals(2, second.size());
// Within second set, dataNodes[5] with less free space
- chosenNode = replicator.chooseReplicaToDelete(
+ chosen = replicator.chooseReplicaToDelete(
null, null, (short)1, first, second);
- assertEquals(chosenNode, dataNodes[5]);
+ assertEquals(chosen, storages[5]);
}
/**
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java Fri Jul 25 20:33:09 2014
@@ -285,8 +285,10 @@ public class TestJspHelper {
String user = "TheNurse";
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
- conf.set(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(realUser), "*");
- conf.set(DefaultImpersonationProvider.getProxySuperuserIpConfKey(realUser), "*");
+ conf.set(DefaultImpersonationProvider.getTestProvider().
+ getProxySuperuserGroupConfKey(realUser), "*");
+ conf.set(DefaultImpersonationProvider.getTestProvider().
+ getProxySuperuserIpConfKey(realUser), "*");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi;
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java Fri Jul 25 20:33:09 2014
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -324,7 +325,7 @@ public abstract class BlockReportTestBas
public void blockReport_03() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
- ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
+ writeFile(METHOD_NAME, FILE_SIZE, filePath);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N0);
@@ -363,7 +364,7 @@ public abstract class BlockReportTestBas
// Create a bogus new block which will not be present on the namenode.
ExtendedBlock b = new ExtendedBlock(
poolId, rand.nextLong(), 1024L, rand.nextLong());
- dn.getFSDataset().createRbw(b);
+ dn.getFSDataset().createRbw(StorageType.DEFAULT, b);
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java Fri Jul 25 20:33:09 2014
@@ -116,7 +116,8 @@ public class DataNodeTestUtils {
public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
BlockPoolSliceScanner bpScanner = getBlockPoolScanner(dn, b);
- bpScanner.verifyBlock(b);
+ bpScanner.verifyBlock(new ExtendedBlock(b.getBlockPoolId(),
+ new BlockPoolSliceScanner.BlockScanInfo(b.getLocalBlock())));
}
private static BlockPoolSliceScanner getBlockPoolScanner(DataNode dn,
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Fri Jul 25 20:33:09 2014
@@ -744,14 +744,14 @@ public class SimulatedFSDataset implemen
}
@Override // FsDatasetSpi
- public synchronized ReplicaInPipelineInterface createRbw(ExtendedBlock b)
- throws IOException {
- return createTemporary(b);
+ public synchronized ReplicaInPipelineInterface createRbw(
+ StorageType storageType, ExtendedBlock b) throws IOException {
+ return createTemporary(storageType, b);
}
@Override // FsDatasetSpi
- public synchronized ReplicaInPipelineInterface createTemporary(ExtendedBlock b)
- throws IOException {
+ public synchronized ReplicaInPipelineInterface createTemporary(
+ StorageType storageType, ExtendedBlock b) throws IOException {
if (isValidBlock(b)) {
throw new ReplicaAlreadyExistsException("Block " + b +
" is valid, and cannot be written to.");
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java Fri Jul 25 20:33:09 2014
@@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.Distribute
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -531,7 +532,7 @@ public class TestBlockRecovery {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
- dn.data.createRbw(block);
+ dn.data.createRbw(StorageType.DEFAULT, block);
try {
dn.syncBlock(rBlock, initBlockRecords(dn));
fail("Sync should fail");
@@ -554,7 +555,8 @@ public class TestBlockRecovery {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
- ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
+ ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(
+ StorageType.DEFAULT, block);
ReplicaOutputStreams streams = null;
try {
streams = replicaInfo.createStreams(true,
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Fri Jul 25 20:33:09 2014
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -264,7 +265,8 @@ public class TestBlockReplacement {
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
- new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
+ new Sender(out).replaceBlock(block, StorageType.DEFAULT,
+ BlockTokenSecretManager.DUMMY_TOKEN,
source.getDatanodeUuid(), sourceProxy);
out.flush();
// receiveResponse
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Fri Jul 25 20:33:09 2014
@@ -46,9 +46,11 @@ import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.net.TcpPeerServer;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -58,6 +60,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.token.Token;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -307,7 +310,8 @@ public class TestDataNodeVolumeFailure {
setConfiguration(conf).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
- public Peer newConnectedPeer(InetSocketAddress addr)
+ public Peer newConnectedPeer(InetSocketAddress addr,
+ Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
throws IOException {
Peer peer = null;
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Fri Jul 25 20:33:09 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -147,9 +148,9 @@ public class TestDiskError {
DataChecksum checksum = DataChecksum.newDataChecksum(
DataChecksum.Type.CRC32, 512);
- new Sender(out).writeBlock(block.getBlock(),
+ new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT,
BlockTokenSecretManager.DUMMY_TOKEN, "",
- new DatanodeInfo[0], null,
+ new DatanodeInfo[0], new StorageType[0], null,
BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
checksum, CachingStrategy.newDefaultStrategy());
out.flush();
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Fri Jul 25 20:33:09 2014
@@ -29,6 +29,7 @@ import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -65,7 +66,8 @@ public class TestSimulatedFSDataset {
ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0);
// we pass expected len as zero, - fsdataset should use the sizeof actual
// data written
- ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
+ ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
+ StorageType.DEFAULT, b);
ReplicaOutputStreams out = bInfo.createStreams(true,
DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
try {
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java Fri Jul 25 20:33:09 2014
@@ -21,6 +21,7 @@ import java.io.IOException;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
@@ -147,7 +148,7 @@ public class TestWriteToReplica {
};
ReplicaMap replicasMap = dataSet.volumeMap;
- FsVolumeImpl vol = dataSet.volumes.getNextVolume(0);
+ FsVolumeImpl vol = dataSet.volumes.getNextVolume(StorageType.DEFAULT, 0);
ReplicaInfo replicaInfo = new FinalizedReplica(
blocks[FINALIZED].getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
replicasMap.add(bpid, replicaInfo);
@@ -357,7 +358,7 @@ public class TestWriteToReplica {
}
try {
- dataSet.createRbw(blocks[FINALIZED]);
+ dataSet.createRbw(StorageType.DEFAULT, blocks[FINALIZED]);
Assert.fail("Should not have created a replica that's already " +
"finalized " + blocks[FINALIZED]);
} catch (ReplicaAlreadyExistsException e) {
@@ -375,7 +376,7 @@ public class TestWriteToReplica {
}
try {
- dataSet.createRbw(blocks[TEMPORARY]);
+ dataSet.createRbw(StorageType.DEFAULT, blocks[TEMPORARY]);
Assert.fail("Should not have created a replica that had created as " +
"temporary " + blocks[TEMPORARY]);
} catch (ReplicaAlreadyExistsException e) {
@@ -385,7 +386,7 @@ public class TestWriteToReplica {
0L, blocks[RBW].getNumBytes()); // expect to be successful
try {
- dataSet.createRbw(blocks[RBW]);
+ dataSet.createRbw(StorageType.DEFAULT, blocks[RBW]);
Assert.fail("Should not have created a replica that had created as RBW " +
blocks[RBW]);
} catch (ReplicaAlreadyExistsException e) {
@@ -401,7 +402,7 @@ public class TestWriteToReplica {
}
try {
- dataSet.createRbw(blocks[RWR]);
+ dataSet.createRbw(StorageType.DEFAULT, blocks[RWR]);
Assert.fail("Should not have created a replica that was waiting to be " +
"recovered " + blocks[RWR]);
} catch (ReplicaAlreadyExistsException e) {
@@ -417,7 +418,7 @@ public class TestWriteToReplica {
}
try {
- dataSet.createRbw(blocks[RUR]);
+ dataSet.createRbw(StorageType.DEFAULT, blocks[RUR]);
Assert.fail("Should not have created a replica that was under recovery " +
blocks[RUR]);
} catch (ReplicaAlreadyExistsException e) {
@@ -434,45 +435,45 @@ public class TestWriteToReplica {
e.getMessage().contains(ReplicaNotFoundException.NON_EXISTENT_REPLICA));
}
- dataSet.createRbw(blocks[NON_EXISTENT]);
+ dataSet.createRbw(StorageType.DEFAULT, blocks[NON_EXISTENT]);
}
private void testWriteToTemporary(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throws IOException {
try {
- dataSet.createTemporary(blocks[FINALIZED]);
+ dataSet.createTemporary(StorageType.DEFAULT, blocks[FINALIZED]);
Assert.fail("Should not have created a temporary replica that was " +
"finalized " + blocks[FINALIZED]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
- dataSet.createTemporary(blocks[TEMPORARY]);
+ dataSet.createTemporary(StorageType.DEFAULT, blocks[TEMPORARY]);
Assert.fail("Should not have created a replica that had created as" +
"temporary " + blocks[TEMPORARY]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
- dataSet.createTemporary(blocks[RBW]);
+ dataSet.createTemporary(StorageType.DEFAULT, blocks[RBW]);
Assert.fail("Should not have created a replica that had created as RBW " +
blocks[RBW]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
- dataSet.createTemporary(blocks[RWR]);
+ dataSet.createTemporary(StorageType.DEFAULT, blocks[RWR]);
Assert.fail("Should not have created a replica that was waiting to be " +
"recovered " + blocks[RWR]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
- dataSet.createTemporary(blocks[RUR]);
+ dataSet.createTemporary(StorageType.DEFAULT, blocks[RUR]);
Assert.fail("Should not have created a replica that was under recovery " +
blocks[RUR]);
} catch (ReplicaAlreadyExistsException e) {
}
- dataSet.createTemporary(blocks[NON_EXISTENT]);
+ dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
}
}
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java Fri Jul 25 20:33:09 2014
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.na
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.io.FileNotFoundException;
import java.security.PrivilegedExceptionAction;
import java.util.EnumSet;
import java.util.List;
@@ -46,6 +45,7 @@ import static org.apache.hadoop.fs.permi
import static org.apache.hadoop.fs.permission.FsAction.ALL;
import static org.apache.hadoop.fs.permission.FsAction.READ;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.junit.After;
@@ -261,11 +261,12 @@ public class FSXAttrBaseTest {
fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with empty name should fail.");
+ } catch (RemoteException e) {
+ assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+ HadoopIllegalArgumentException.class.getCanonicalName());
+ GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
} catch (HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
- } catch (IllegalArgumentException e) {
- GenericTestUtils.assertExceptionContains("Invalid value: \"user.\" does " +
- "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e);
}
// Set xattr with invalid name: "a1"
@@ -274,11 +275,12 @@ public class FSXAttrBaseTest {
XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with invalid name prefix or without " +
"name prefix should fail.");
+ } catch (RemoteException e) {
+ assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+ HadoopIllegalArgumentException.class.getCanonicalName());
+ GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
} catch (HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
- } catch (IllegalArgumentException e) {
- GenericTestUtils.assertExceptionContains("Invalid value: \"a1\" does " +
- "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e);
}
// Set xattr without XAttrSetFlag
@@ -341,9 +343,18 @@ public class FSXAttrBaseTest {
}
/**
- * Tests for getting xattr
- * 1. To get xattr which does not exist.
- * 2. To get multiple xattrs.
+ * getxattr tests. Test that getxattr throws an exception if any of
+ * the following are true:
+ * an xattr that was requested doesn't exist
+ * the caller specifies an unknown namespace
+ * the caller doesn't have access to the namespace
+ * the caller doesn't have permission to get the value of the xattr
+ * the caller does not have search access to the parent directory
+ * the caller has only read access to the owning directory
+ * the caller has only search access to the owning directory and
+ * execute/search access to the actual entity
+ * the caller does not have search access to the owning directory and read
+ * access to the actual entity
*/
@Test(timeout = 120000)
public void testGetXAttrs() throws Exception {
@@ -351,21 +362,159 @@ public class FSXAttrBaseTest {
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
- // XAttr does not exist.
- byte[] value = fs.getXAttr(path, name3);
- Assert.assertEquals(value, null);
-
- List<String> names = Lists.newArrayList();
- names.add(name1);
- names.add(name2);
- names.add(name3);
- Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
- Assert.assertEquals(xattrs.size(), 2);
- Assert.assertArrayEquals(value1, xattrs.get(name1));
- Assert.assertArrayEquals(value2, xattrs.get(name2));
+ /* An XAttr that was requested does not exist. */
+ try {
+ final byte[] value = fs.getXAttr(path, name3);
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains(
+ "At least one of the attributes provided was not found.", e);
+ }
+
+ /* Throw an exception if an xattr that was requested does not exist. */
+ {
+ final List<String> names = Lists.newArrayList();
+ names.add(name1);
+ names.add(name2);
+ names.add(name3);
+ try {
+ final Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains(
+ "At least one of the attributes provided was not found.", e);
+ }
+ }
fs.removeXAttr(path, name1);
fs.removeXAttr(path, name2);
+
+ /* Unknown namespace should throw an exception. */
+ try {
+ final byte[] xattr = fs.getXAttr(path, "wackynamespace.foo");
+ Assert.fail("expected IOException");
+ } catch (Exception e) {
+ GenericTestUtils.assertExceptionContains
+ ("An XAttr name must be prefixed with user/trusted/security/system, " +
+ "followed by a '.'",
+ e);
+ }
+
+ /*
+ * The 'trusted' namespace should not be accessible and should throw an
+ * exception.
+ */
+ final UserGroupInformation user = UserGroupInformation.
+ createUserForTesting("user", new String[] {"mygroup"});
+ fs.setXAttr(path, "trusted.foo", "1234".getBytes());
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ final byte[] xattr = userFs.getXAttr(path, "trusted.foo");
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
+ }
+
+ fs.setXAttr(path, name1, "1234".getBytes());
+
+ /*
+ * Test that an exception is thrown if the caller doesn't have permission to
+ * get the value of the xattr.
+ */
+
+ /* Set access so that only the owner has access. */
+ fs.setPermission(path, new FsPermission((short) 0700));
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ final byte[] xattr = userFs.getXAttr(path, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /*
+ * The caller must have search access to the parent directory.
+ */
+ final Path childDir = new Path(path, "child" + pathCount);
+ /* Set access to parent so that only the owner has access. */
+ FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700));
+ fs.setXAttr(childDir, name1, "1234".getBytes());
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ final byte[] xattr = userFs.getXAttr(childDir, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /* Check that read access to the owning directory is not good enough. */
+ fs.setPermission(path, new FsPermission((short) 0704));
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ final byte[] xattr = userFs.getXAttr(childDir, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /*
+ * Check that search access to the owning directory and search/execute
+ * access to the actual entity with extended attributes is not good enough.
+ */
+ fs.setPermission(path, new FsPermission((short) 0701));
+ fs.setPermission(childDir, new FsPermission((short) 0701));
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ final byte[] xattr = userFs.getXAttr(childDir, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /*
+ * Check that search access to the owning directory and read access to
+ * the actual entity with the extended attribute is good enough.
+ */
+ fs.setPermission(path, new FsPermission((short) 0701));
+ fs.setPermission(childDir, new FsPermission((short) 0704));
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ final byte[] xattr = userFs.getXAttr(childDir, name1);
+ return null;
+ }
+ });
}
/**
@@ -402,6 +551,166 @@ public class FSXAttrBaseTest {
fs.removeXAttr(path, name3);
}
+ /**
+ * removexattr tests. Test that removexattr throws an exception if any of
+ * the following are true:
+ * an xattr that was requested doesn't exist
+ * the caller specifies an unknown namespace
+ * the caller doesn't have access to the namespace
+ * the caller doesn't have permission to get the value of the xattr
+ * the caller does not have "execute" (scan) access to the parent directory
+ * the caller has only read access to the owning directory
+ * the caller has only execute access to the owning directory and execute
+ * access to the actual entity
+ * the caller does not have execute access to the owning directory and write
+ * access to the actual entity
+ */
+ @Test(timeout = 120000)
+ public void testRemoveXAttrPermissions() throws Exception {
+ FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
+ fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+ fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+ fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
+
+ try {
+ fs.removeXAttr(path, name2);
+ fs.removeXAttr(path, name2);
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("No matching attributes found", e);
+ }
+
+ /* Unknown namespace should throw an exception. */
+ final String expectedExceptionString = "An XAttr name must be prefixed " +
+ "with user/trusted/security/system, followed by a '.'";
+ try {
+ fs.removeXAttr(path, "wackynamespace.foo");
+ Assert.fail("expected IOException");
+ } catch (RemoteException e) {
+ assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+ HadoopIllegalArgumentException.class.getCanonicalName());
+ GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
+ } catch (HadoopIllegalArgumentException e) {
+ GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
+ }
+
+ /*
+ * The 'trusted' namespace should not be accessible and should throw an
+ * exception.
+ */
+ final UserGroupInformation user = UserGroupInformation.
+ createUserForTesting("user", new String[] {"mygroup"});
+ fs.setXAttr(path, "trusted.foo", "1234".getBytes());
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ userFs.removeXAttr(path, "trusted.foo");
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
+ } finally {
+ fs.removeXAttr(path, "trusted.foo");
+ }
+
+ /*
+ * Test that an exception is thrown if the caller doesn't have permission to
+ * get the value of the xattr.
+ */
+
+ /* Set access so that only the owner has access. */
+ fs.setPermission(path, new FsPermission((short) 0700));
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ userFs.removeXAttr(path, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /*
+ * The caller must have "execute" (scan) access to the parent directory.
+ */
+ final Path childDir = new Path(path, "child" + pathCount);
+ /* Set access to parent so that only the owner has access. */
+ FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700));
+ fs.setXAttr(childDir, name1, "1234".getBytes());
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ userFs.removeXAttr(childDir, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /* Check that read access to the owning directory is not good enough. */
+ fs.setPermission(path, new FsPermission((short) 0704));
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ userFs.removeXAttr(childDir, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /*
+ * Check that execute access to the owning directory and scan access to
+ * the actual entity with extended attributes is not good enough.
+ */
+ fs.setPermission(path, new FsPermission((short) 0701));
+ fs.setPermission(childDir, new FsPermission((short) 0701));
+ try {
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ userFs.removeXAttr(childDir, name1);
+ return null;
+ }
+ });
+ Assert.fail("expected IOException");
+ } catch (IOException e) {
+ GenericTestUtils.assertExceptionContains("Permission denied", e);
+ }
+
+ /*
+ * Check that execute access to the owning directory and write access to
+ * the actual entity with extended attributes is good enough.
+ */
+ fs.setPermission(path, new FsPermission((short) 0701));
+ fs.setPermission(childDir, new FsPermission((short) 0706));
+ user.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ final FileSystem userFs = dfsCluster.getFileSystem();
+ userFs.removeXAttr(childDir, name1);
+ return null;
+ }
+ });
+ }
+
@Test(timeout = 120000)
public void testRenameFileWithXAttr() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java Fri Jul 25 20:33:09 2014
@@ -18,32 +18,41 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
-import java.util.ArrayList;
-import java.util.Random;
-import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Random;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.junit.Test;
+import org.apache.log4j.Level;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.BeforeClass;
+import org.junit.Test;
public class TestFavoredNodesEndToEnd {
+ {
+ ((Log4JLogger)LogFactory.getLog(BlockPlacementPolicy.class)).getLogger().setLevel(Level.ALL);
+ }
+
private static MiniDFSCluster cluster;
private static Configuration conf;
private final static int NUM_DATA_NODES = 10;
@@ -79,7 +88,7 @@ public class TestFavoredNodesEndToEnd {
InetSocketAddress datanode[] = getDatanodes(rand);
Path p = new Path("/filename"+i);
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
- 4096, (short)3, (long)4096, null, datanode);
+ 4096, (short)3, 4096L, null, datanode);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations = getBlockLocations(p);
@@ -98,14 +107,13 @@ public class TestFavoredNodesEndToEnd {
//get some other nodes. In other words, the write to hdfs should not fail
//and if we do getBlockLocations on the file, we should see one blklocation
//and three hosts for that
- Random rand = new Random(System.currentTimeMillis());
InetSocketAddress arbitraryAddrs[] = new InetSocketAddress[3];
for (int i = 0; i < 3; i++) {
arbitraryAddrs[i] = getArbitraryLocalHostAddr();
}
Path p = new Path("/filename-foo-bar");
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
- 4096, (short)3, (long)4096, null, arbitraryAddrs);
+ 4096, (short)3, 4096L, null, arbitraryAddrs);
out.write(SOME_BYTES);
out.close();
getBlockLocations(p);
@@ -113,35 +121,41 @@ public class TestFavoredNodesEndToEnd {
@Test(timeout=180000)
public void testWhenSomeNodesAreNotGood() throws Exception {
+ // 4 favored nodes
+ final InetSocketAddress addrs[] = new InetSocketAddress[4];
+ final String[] hosts = new String[addrs.length];
+ for (int i = 0; i < addrs.length; i++) {
+ addrs[i] = datanodes.get(i).getXferAddress();
+ hosts[i] = addrs[i].getAddress().getHostAddress() + ":" + addrs[i].getPort();
+ }
+
//make some datanode not "good" so that even if the client prefers it,
//the namenode would not give it as a replica to write to
DatanodeInfo d = cluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().getDatanodeByXferAddr(
- datanodes.get(0).getXferAddress().getAddress().getHostAddress(),
- datanodes.get(0).getXferAddress().getPort());
+ addrs[0].getAddress().getHostAddress(), addrs[0].getPort());
//set the decommission status to true so that
//BlockPlacementPolicyDefault.isGoodTarget returns false for this dn
d.setDecommissioned();
- InetSocketAddress addrs[] = new InetSocketAddress[3];
- for (int i = 0; i < 3; i++) {
- addrs[i] = datanodes.get(i).getXferAddress();
- }
Path p = new Path("/filename-foo-bar-baz");
+ final short replication = (short)3;
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
- 4096, (short)3, (long)4096, null, addrs);
+ 4096, replication, 4096L, null, addrs);
out.write(SOME_BYTES);
out.close();
//reset the state
d.stopDecommission();
+
BlockLocation[] locations = getBlockLocations(p);
+ Assert.assertEquals(replication, locations[0].getNames().length);;
//also make sure that the datanode[0] is not in the list of hosts
- String datanode0 =
- datanodes.get(0).getXferAddress().getAddress().getHostAddress()
- + ":" + datanodes.get(0).getXferAddress().getPort();
- for (int i = 0; i < 3; i++) {
- if (locations[0].getNames()[i].equals(datanode0)) {
- fail(datanode0 + " not supposed to be a replica for the block");
- }
+ for (int i = 0; i < replication; i++) {
+ final String loc = locations[0].getNames()[i];
+ int j = 0;
+ for(; j < hosts.length && !loc.equals(hosts[j]); j++);
+ Assert.assertTrue("j=" + j, j > 0);
+ Assert.assertTrue("loc=" + loc + " not in host list "
+ + Arrays.asList(hosts) + ", j=" + j, j < hosts.length);
}
}
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java Fri Jul 25 20:33:09 2014
@@ -212,18 +212,25 @@ public class TestNNStorageRetentionManag
tc.addImage("/foo1/current/" + getImageFileName(300), false);
tc.addImage("/foo1/current/" + getImageFileName(400), false);
+ // Segments containing txns upto txId 250 are extra and should be purged.
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(1, 100), true);
- // Without lowering the max segments to retain, we'd retain all segments
- // going back to txid 150 (300 - 150).
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(101, 175), true);
+ tc.addLog("/foo2/current/" + getInProgressEditsFileName(176) + ".empty",
+ true);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(176, 200), true);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(201, 225), true);
+ tc.addLog("/foo2/current/" + getInProgressEditsFileName(226) + ".corrupt",
+ true);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(226, 240), true);
// Only retain 2 extra segments. The 301-350 and 351-400 segments are
// considered required, not extra.
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(241, 275), false);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(276, 300), false);
+ tc.addLog("/foo2/current/" + getInProgressEditsFileName(301) + ".empty",
+ false);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(301, 350), false);
+ tc.addLog("/foo2/current/" + getInProgressEditsFileName(351) + ".corrupt",
+ false);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(351, 400), false);
tc.addLog("/foo2/current/" + getInProgressEditsFileName(401), false);
runTest(tc);
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java Fri Jul 25 20:33:09 2014
@@ -18,9 +18,11 @@
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.assertTrue;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
+import static org.junit.Assert.*;
import java.io.File;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@@ -28,12 +30,21 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.junit.Test;
@@ -153,4 +164,177 @@ public class TestNamenodeCapacityReport
if (cluster != null) {cluster.shutdown();}
}
}
+
+ private static final float EPSILON = 0.0001f;
+ @Test
+ public void testXceiverCount() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ // don't waste time retrying if close fails
+ conf.setInt(DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, 0);
+ MiniDFSCluster cluster = null;
+
+ final int nodes = 8;
+ final int fileCount = 5;
+ final short fileRepl = 3;
+
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(nodes).build();
+ cluster.waitActive();
+
+ final FSNamesystem namesystem = cluster.getNamesystem();
+ final DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
+ List<DataNode> datanodes = cluster.getDataNodes();
+ final DistributedFileSystem fs = cluster.getFileSystem();
+
+ // trigger heartbeats in case not already sent
+ triggerHeartbeats(datanodes);
+
+ // check that all nodes are live and in service
+ int expectedTotalLoad = nodes; // xceiver server adds 1 to load
+ int expectedInServiceNodes = nodes;
+ int expectedInServiceLoad = nodes;
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceLoad,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+
+ // shutdown half the nodes and force a heartbeat check to ensure
+ // counts are accurate
+ for (int i=0; i < nodes/2; i++) {
+ DataNode dn = datanodes.get(i);
+ DatanodeDescriptor dnd = dnm.getDatanode(dn.getDatanodeId());
+ dn.shutdown();
+ dnd.setLastUpdate(0L);
+ BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
+ expectedInServiceNodes--;
+ assertEquals(expectedInServiceNodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+ }
+
+ // restart the nodes to verify that counts are correct after
+ // node re-registration
+ cluster.restartDataNodes();
+ cluster.waitActive();
+ datanodes = cluster.getDataNodes();
+ expectedInServiceNodes = nodes;
+ assertEquals(nodes, datanodes.size());
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceLoad,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+
+ // create streams and hsync to force datastreamers to start
+ DFSOutputStream[] streams = new DFSOutputStream[fileCount];
+ for (int i=0; i < fileCount; i++) {
+ streams[i] = (DFSOutputStream)fs.create(new Path("/f"+i), fileRepl)
+ .getWrappedStream();
+ streams[i].write("1".getBytes());
+ streams[i].hsync();
+ // the load for writers is 2 because both the write xceiver & packet
+ // responder threads are counted in the load
+ expectedTotalLoad += 2*fileRepl;
+ expectedInServiceLoad += 2*fileRepl;
+ }
+ // force nodes to send load update
+ triggerHeartbeats(datanodes);
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes,
+ namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+
+ // decomm a few nodes, substract their load from the expected load,
+ // trigger heartbeat to force load update
+ for (int i=0; i < fileRepl; i++) {
+ expectedInServiceNodes--;
+ DatanodeDescriptor dnd =
+ dnm.getDatanode(datanodes.get(i).getDatanodeId());
+ expectedInServiceLoad -= dnd.getXceiverCount();
+ dnm.startDecommission(dnd);
+ DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
+ Thread.sleep(100);
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes,
+ namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+ }
+
+ // check expected load while closing each stream. recalc expected
+ // load based on whether the nodes in the pipeline are decomm
+ for (int i=0; i < fileCount; i++) {
+ int decomm = 0;
+ for (DatanodeInfo dni : streams[i].getPipeline()) {
+ DatanodeDescriptor dnd = dnm.getDatanode(dni);
+ expectedTotalLoad -= 2;
+ if (dnd.isDecommissionInProgress() || dnd.isDecommissioned()) {
+ decomm++;
+ } else {
+ expectedInServiceLoad -= 2;
+ }
+ }
+ try {
+ streams[i].close();
+ } catch (IOException ioe) {
+ // nodes will go decommissioned even if there's a UC block whose
+ // other locations are decommissioned too. we'll ignore that
+ // bug for now
+ if (decomm < fileRepl) {
+ throw ioe;
+ }
+ }
+ triggerHeartbeats(datanodes);
+ // verify node count and loads
+ assertEquals(nodes, namesystem.getNumLiveDataNodes());
+ assertEquals(expectedInServiceNodes,
+ namesystem.getNumDatanodesInService());
+ assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
+ assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+ }
+
+ // shutdown each node, verify node counts based on decomm state
+ for (int i=0; i < nodes; i++) {
+ DataNode dn = datanodes.get(i);
+ dn.shutdown();
+ // force it to appear dead so live count decreases
+ DatanodeDescriptor dnDesc = dnm.getDatanode(dn.getDatanodeId());
+ dnDesc.setLastUpdate(0L);
+ BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
+ assertEquals(nodes-1-i, namesystem.getNumLiveDataNodes());
+ // first few nodes are already out of service
+ if (i >= fileRepl) {
+ expectedInServiceNodes--;
+ }
+ assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
+
+ // live nodes always report load of 1. no nodes is load 0
+ double expectedXceiverAvg = (i == nodes-1) ? 0.0 : 1.0;
+ assertEquals((double)expectedXceiverAvg,
+ namesystem.getInServiceXceiverAverage(), EPSILON);
+ }
+
+ // final sanity check
+ assertEquals(0, namesystem.getNumLiveDataNodes());
+ assertEquals(0, namesystem.getNumDatanodesInService());
+ assertEquals(0.0, namesystem.getTotalLoad(), EPSILON);
+ assertEquals(0.0, namesystem.getInServiceXceiverAverage(), EPSILON);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ private void triggerHeartbeats(List<DataNode> datanodes)
+ throws IOException, InterruptedException {
+ for (DataNode dn : datanodes) {
+ DataNodeTestUtils.triggerHeartbeat(dn);
+ }
+ Thread.sleep(100);
+ }
}
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Fri Jul 25 20:33:09 2014
@@ -415,7 +415,7 @@ public class TestNamenodeRetryCache {
LightWeightCache<CacheEntry, CacheEntry> cacheSet =
(LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
- assertEquals(22, cacheSet.size());
+ assertEquals(23, cacheSet.size());
Map<CacheEntry, CacheEntry> oldEntries =
new HashMap<CacheEntry, CacheEntry>();
@@ -434,7 +434,7 @@ public class TestNamenodeRetryCache {
assertTrue(namesystem.hasRetryCache());
cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
.getRetryCache().getCacheSet();
- assertEquals(22, cacheSet.size());
+ assertEquals(23, cacheSet.size());
iter = cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry = iter.next();
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java Fri Jul 25 20:33:09 2014
@@ -21,6 +21,8 @@ import static org.junit.Assert.assertEqu
import static org.junit.Assert.assertTrue;
import java.net.URI;
+import java.util.Arrays;
+import java.util.Collection;
import java.util.Collections;
import org.apache.hadoop.conf.Configuration;
@@ -30,11 +32,15 @@ import org.apache.hadoop.hdfs.server.com
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
/**
* This class tests various upgrade cases from earlier versions to current
* version with and without clusterid.
*/
+@RunWith(value = Parameterized.class)
public class TestStartupOptionUpgrade {
private Configuration conf;
@@ -42,10 +48,21 @@ public class TestStartupOptionUpgrade {
private int layoutVersion;
NNStorage storage;
+ @Parameters
+ public static Collection<Object[]> startOption() {
+ Object[][] params = new Object[][] { { StartupOption.UPGRADE },
+ { StartupOption.UPGRADEONLY } };
+ return Arrays.asList(params);
+ }
+
+ public TestStartupOptionUpgrade(StartupOption startOption) {
+ super();
+ this.startOpt = startOption;
+ }
+
@Before
public void setUp() throws Exception {
conf = new HdfsConfiguration();
- startOpt = StartupOption.UPGRADE;
startOpt.setClusterId(null);
storage = new NNStorage(conf,
Collections.<URI>emptyList(),
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java Fri Jul 25 20:33:09 2014
@@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.server.blo
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -585,15 +585,14 @@ public class TestDNFencing {
}
@Override
- public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode,
+ public DatanodeStorageInfo chooseReplicaToDelete(BlockCollection inode,
Block block, short replicationFactor,
- Collection<DatanodeDescriptor> first,
- Collection<DatanodeDescriptor> second) {
+ Collection<DatanodeStorageInfo> first,
+ Collection<DatanodeStorageInfo> second) {
- Collection<DatanodeDescriptor> chooseFrom =
- !first.isEmpty() ? first : second;
+ Collection<DatanodeStorageInfo> chooseFrom = !first.isEmpty() ? first : second;
- List<DatanodeDescriptor> l = Lists.newArrayList(chooseFrom);
+ List<DatanodeStorageInfo> l = Lists.newArrayList(chooseFrom);
return l.get(DFSUtil.getRandom().nextInt(l.size()));
}
}
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java Fri Jul 25 20:33:09 2014
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.Distribute
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
@@ -299,7 +300,8 @@ public class TestDelegationTokensWithHA
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test");
URI haUri = new URI("hdfs://my-ha-uri/");
- token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri));
+ token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri,
+ HdfsConstants.HDFS_URI_SCHEME));
ugi.addToken(token);
Collection<InetSocketAddress> nnAddrs = new HashSet<InetSocketAddress>();
@@ -355,7 +357,8 @@ public class TestDelegationTokensWithHA
@Test
public void testDFSGetCanonicalServiceName() throws Exception {
URI hAUri = HATestUtil.getLogicalUri(cluster);
- String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri).toString();
+ String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri,
+ HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService, dfs.getCanonicalServiceName());
final String renewer = UserGroupInformation.getCurrentUser().getShortUserName();
final Token<DelegationTokenIdentifier> token =
@@ -371,7 +374,8 @@ public class TestDelegationTokensWithHA
Configuration conf = dfs.getConf();
URI haUri = HATestUtil.getLogicalUri(cluster);
AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf);
- String haService = HAUtil.buildTokenServiceForLogicalUri(haUri).toString();
+ String haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
+ HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService, afs.getCanonicalServiceName());
Token<?> token = afs.getDelegationTokens(
UserGroupInformation.getCurrentUser().getShortUserName()).get(0);
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Fri Jul 25 20:33:09 2014
@@ -160,7 +160,7 @@ public class TestRetryCacheWithHA {
FSNamesystem fsn0 = cluster.getNamesystem(0);
LightWeightCache<CacheEntry, CacheEntry> cacheSet =
(LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
- assertEquals(22, cacheSet.size());
+ assertEquals(23, cacheSet.size());
Map<CacheEntry, CacheEntry> oldEntries =
new HashMap<CacheEntry, CacheEntry>();
@@ -181,7 +181,7 @@ public class TestRetryCacheWithHA {
FSNamesystem fsn1 = cluster.getNamesystem(1);
cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
.getRetryCache().getCacheSet();
- assertEquals(22, cacheSet.size());
+ assertEquals(23, cacheSet.size());
iter = cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry = iter.next();
@@ -1047,6 +1047,49 @@ public class TestRetryCacheWithHA {
}
}
+ /** removeXAttr */
+ class RemoveXAttrOp extends AtMostOnceOp {
+ private final String src;
+
+ RemoveXAttrOp(DFSClient client, String src) {
+ super("removeXAttr", client);
+ this.src = src;
+ }
+
+ @Override
+ void prepare() throws Exception {
+ Path p = new Path(src);
+ if (!dfs.exists(p)) {
+ DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0);
+ client.setXAttr(src, "user.key", "value".getBytes(),
+ EnumSet.of(XAttrSetFlag.CREATE));
+ }
+ }
+
+ @Override
+ void invoke() throws Exception {
+ client.removeXAttr(src, "user.key");
+ }
+
+ @Override
+ boolean checkNamenodeBeforeReturn() throws Exception {
+ for (int i = 0; i < CHECKTIMES; i++) {
+ Map<String, byte[]> iter = dfs.getXAttrs(new Path(src));
+ Set<String> keySet = iter.keySet();
+ if (!keySet.contains("user.key")) {
+ return true;
+ }
+ Thread.sleep(1000);
+ }
+ return false;
+ }
+
+ @Override
+ Object getResult() {
+ return null;
+ }
+ }
+
@Test (timeout=60000)
public void testCreateSnapshot() throws Exception {
final DFSClient client = genClientWithDummyHandler();
@@ -1183,6 +1226,13 @@ public class TestRetryCacheWithHA {
testClientRetryWithFailover(op);
}
+ @Test (timeout=60000)
+ public void testRemoveXAttr() throws Exception {
+ DFSClient client = genClientWithDummyHandler();
+ AtMostOnceOp op = new RemoveXAttrOp(client, "/removexattr");
+ testClientRetryWithFailover(op);
+ }
+
/**
* When NN failover happens, if the client did not receive the response and
* send a retry request to the other NN, the same response should be recieved
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java Fri Jul 25 20:33:09 2014
@@ -92,7 +92,7 @@ public class TestWebHdfsDataLocality {
//The chosen datanode must be the same as the client address
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
- namenode, f, PutOpParam.Op.CREATE, -1L, blocksize);
+ namenode, f, PutOpParam.Op.CREATE, -1L, blocksize, null);
Assert.assertEquals(ipAddr, chosen.getIpAddr());
}
}
@@ -117,23 +117,104 @@ public class TestWebHdfsDataLocality {
{ //test GETFILECHECKSUM
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
- namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize);
+ namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize, null);
Assert.assertEquals(expected, chosen);
}
{ //test OPEN
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
- namenode, f, GetOpParam.Op.OPEN, 0, blocksize);
+ namenode, f, GetOpParam.Op.OPEN, 0, blocksize, null);
Assert.assertEquals(expected, chosen);
}
{ //test APPEND
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
- namenode, f, PostOpParam.Op.APPEND, -1L, blocksize);
+ namenode, f, PostOpParam.Op.APPEND, -1L, blocksize, null);
Assert.assertEquals(expected, chosen);
}
} finally {
cluster.shutdown();
}
}
+
+ @Test
+ public void testExcludeDataNodes() throws Exception {
+ final Configuration conf = WebHdfsTestUtil.createConf();
+ final String[] racks = {RACK0, RACK0, RACK1, RACK1, RACK2, RACK2};
+ final String[] hosts = {"DataNode1", "DataNode2", "DataNode3","DataNode4","DataNode5","DataNode6"};
+ final int nDataNodes = hosts.length;
+ LOG.info("nDataNodes=" + nDataNodes + ", racks=" + Arrays.asList(racks)
+ + ", hosts=" + Arrays.asList(hosts));
+
+ final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+ .hosts(hosts).numDataNodes(nDataNodes).racks(racks).build();
+
+ try {
+ cluster.waitActive();
+
+ final DistributedFileSystem dfs = cluster.getFileSystem();
+ final NameNode namenode = cluster.getNameNode();
+ final DatanodeManager dm = namenode.getNamesystem().getBlockManager(
+ ).getDatanodeManager();
+ LOG.info("dm=" + dm);
+
+ final long blocksize = DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
+ final String f = "/foo";
+
+ //create a file with three replica.
+ final Path p = new Path(f);
+ final FSDataOutputStream out = dfs.create(p, (short)3);
+ out.write(1);
+ out.close();
+
+ //get replica location.
+ final LocatedBlocks locatedblocks = NameNodeAdapter.getBlockLocations(
+ namenode, f, 0, 1);
+ final List<LocatedBlock> lb = locatedblocks.getLocatedBlocks();
+ Assert.assertEquals(1, lb.size());
+ final DatanodeInfo[] locations = lb.get(0).getLocations();
+ Assert.assertEquals(3, locations.length);
+
+
+ //For GETFILECHECKSUM, OPEN and APPEND,
+ //the chosen datanode must be different with exclude nodes.
+
+ StringBuffer sb = new StringBuffer();
+ for (int i = 0; i < 2; i++) {
+ sb.append(locations[i].getXferAddr());
+ { // test GETFILECHECKSUM
+ final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+ namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize,
+ sb.toString());
+ for (int j = 0; j <= i; j++) {
+ Assert.assertNotEquals(locations[j].getHostName(),
+ chosen.getHostName());
+ }
+ }
+
+ { // test OPEN
+ final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+ namenode, f, GetOpParam.Op.OPEN, 0, blocksize, sb.toString());
+ for (int j = 0; j <= i; j++) {
+ Assert.assertNotEquals(locations[j].getHostName(),
+ chosen.getHostName());
+ }
+ }
+
+ { // test APPEND
+ final DatanodeInfo chosen = NamenodeWebHdfsMethods
+ .chooseDatanode(namenode, f, PostOpParam.Op.APPEND, -1L,
+ blocksize, sb.toString());
+ for (int j = 0; j <= i; j++) {
+ Assert.assertNotEquals(locations[j].getHostName(),
+ chosen.getHostName());
+ }
+ }
+
+ sb.append(",");
+ }
+ } finally {
+ cluster.shutdown();
+ }
+ }
}
\ No newline at end of file
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java Fri Jul 25 20:33:09 2014
@@ -21,6 +21,7 @@ import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import com.google.common.base.Charsets;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -46,6 +47,7 @@ public class TestDFSAdminWithHA {
private PrintStream originErr;
private static final String NSID = "ns1";
+ private static String newLine = System.getProperty("line.separator");
private void assertOutputMatches(String string) {
String errOutput = new String(out.toByteArray(), Charsets.UTF_8);
@@ -99,6 +101,14 @@ public class TestDFSAdminWithHA {
System.err.flush();
System.setOut(originOut);
System.setErr(originErr);
+ if (admin != null) {
+ admin.close();
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ out.reset();
+ err.reset();
}
@Test(timeout = 30000)
@@ -108,25 +118,25 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
// Get safemode
exitCode = admin.run(new String[] {"-safemode", "get"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is ON in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
// Leave safemode
exitCode = admin.run(new String[] {"-safemode", "leave"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is OFF in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
// Get safemode
exitCode = admin.run(new String[] {"-safemode", "get"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is OFF in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -136,12 +146,12 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-saveNamespace"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Save namespace successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -151,17 +161,17 @@ public class TestDFSAdminWithHA {
assertEquals(err.toString().trim(), 0, exitCode);
String message = "restoreFailedStorage is set to false for.*";
// Default is false
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage is set to true for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage is set to false for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -170,7 +180,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshNodes"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh nodes successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -179,7 +189,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Balancer bandwidth is set to 10 for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -189,7 +199,7 @@ public class TestDFSAdminWithHA {
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Created metasave file dfs.meta in the log directory"
+ " of namenode.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -198,7 +208,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh service acl successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -207,7 +217,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh user to groups mapping successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -217,7 +227,7 @@ public class TestDFSAdminWithHA {
new String[] {"-refreshSuperUserGroupsConfiguration"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh super user groups configuration successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -226,6 +236,6 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshCallQueue"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh call queue successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
}