You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2013/10/29 01:49:23 UTC
svn commit: r1536572 [4/4] - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ dev-support/
src/main/bin/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/client/
src/main/java/org/apache/hadoop/hdfs/p...
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java Tue Oct 29 00:49:20 2013
@@ -133,7 +133,7 @@ public class TestReplicationPolicyWithNo
for(int i=0; i<NUM_OF_DATANODES; i++) {
dataNodes[i].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
+ 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
}
@@ -199,7 +199,8 @@ public class TestReplicationPolicyWithNo
public void testChooseTarget1() throws Exception {
dataNodes[0].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
- HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 4, 0); // overloaded
+ HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+ 0L, 0L, 4, 0); // overloaded
DatanodeDescriptor[] targets;
targets = chooseTarget(0);
@@ -232,7 +233,7 @@ public class TestReplicationPolicyWithNo
dataNodes[0].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
- HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
+ HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
private void verifyNoTwoTargetsOnSameNodeGroup(DatanodeDescriptor[] targets) {
@@ -299,7 +300,8 @@ public class TestReplicationPolicyWithNo
// make data node 0 to be not qualified to choose
dataNodes[0].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
- (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); // no space
+ (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
+ 0L, 0L, 0, 0); // no space
DatanodeDescriptor[] targets;
targets = chooseTarget(0);
@@ -330,7 +332,7 @@ public class TestReplicationPolicyWithNo
dataNodes[0].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
- HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
+ HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
/**
@@ -348,7 +350,7 @@ public class TestReplicationPolicyWithNo
for(int i=0; i<3; i++) {
dataNodes[i].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
- (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0);
+ (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
DatanodeDescriptor[] targets;
@@ -576,11 +578,11 @@ public class TestReplicationPolicyWithNo
for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) {
dataNodes[0].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
- (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0);
+ (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
dataNodesInBoundaryCase[i].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
+ 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
DatanodeDescriptor[] targets;
@@ -611,7 +613,7 @@ public class TestReplicationPolicyWithNo
for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) {
dataNodesInBoundaryCase[i].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
+ 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
List<DatanodeDescriptor> chosenNodes = new ArrayList<DatanodeDescriptor>();
chosenNodes.add(dataNodesInBoundaryCase[0]);
@@ -651,7 +653,7 @@ public class TestReplicationPolicyWithNo
for(int i=0; i<NUM_OF_DATANODES_MORE_TARGETS; i++) {
dataNodesInMoreTargetsCase[i].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
+ 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
DatanodeDescriptor[] targets;
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java Tue Oct 29 00:49:20 2013
@@ -452,9 +452,9 @@ public class TestJspHelper {
DatanodeID dnId2 = new DatanodeID("127.0.0.2", "localhost2", "storage2",
1235, 2346, 3457, 4568);
DatanodeDescriptor dnDesc1 = new DatanodeDescriptor(dnId1, "rack1", 1024,
- 100, 924, 100, 10, 2);
+ 100, 924, 100, 5l, 3l, 10, 2);
DatanodeDescriptor dnDesc2 = new DatanodeDescriptor(dnId2, "rack2", 2500,
- 200, 1848, 200, 20, 1);
+ 200, 1848, 200, 10l, 2l, 20, 1);
ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
live.add(dnDesc1);
live.add(dnDesc2);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Tue Oct 29 00:49:20 2013
@@ -24,6 +24,7 @@ import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.HashMap;
+import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Random;
@@ -465,6 +466,11 @@ public class SimulatedFSDataset implemen
return new BlockListAsLongs(blocks, null);
}
+ @Override // FsDatasetSpi
+ public List<Long> getCacheReport(String bpid) {
+ return new LinkedList<Long>();
+ }
+
@Override // FSDatasetMBean
public long getCapacity() {
return storage.getCapacity();
@@ -490,6 +496,16 @@ public class SimulatedFSDataset implemen
return storage.getNumFailedVolumes();
}
+ @Override // FSDatasetMBean
+ public long getDnCacheUsed() {
+ return 0l;
+ }
+
+ @Override // FSDatasetMBean
+ public long getDnCacheCapacity() {
+ return 0l;
+ }
+
@Override // FsDatasetSpi
public synchronized long getLength(ExtendedBlock b) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
@@ -559,6 +575,18 @@ public class SimulatedFSDataset implemen
}
}
+ @Override // FSDatasetSpi
+ public void cache(String bpid, long[] cacheBlks) {
+ throw new UnsupportedOperationException(
+ "SimulatedFSDataset does not support cache operation!");
+ }
+
+ @Override // FSDatasetSpi
+ public void uncache(String bpid, long[] uncacheBlks) {
+ throw new UnsupportedOperationException(
+ "SimulatedFSDataset does not support uncache operation!");
+ }
+
private BInfo getBInfo(final ExtendedBlock b) {
final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId());
return map == null? null: map.get(b.getLocalBlock());
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java Tue Oct 29 00:49:20 2013
@@ -126,6 +126,8 @@ public class TestBPOfferService {
.when(mock).sendHeartbeat(
Mockito.any(DatanodeRegistration.class),
Mockito.any(StorageReport[].class),
+ Mockito.anyLong(),
+ Mockito.anyLong(),
Mockito.anyInt(),
Mockito.anyInt(),
Mockito.anyInt());
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java Tue Oct 29 00:49:20 2013
@@ -154,6 +154,8 @@ public class TestBlockRecovery {
when(namenode.sendHeartbeat(
Mockito.any(DatanodeRegistration.class),
Mockito.any(StorageReport[].class),
+ Mockito.anyLong(),
+ Mockito.anyLong(),
Mockito.anyInt(),
Mockito.anyInt(),
Mockito.anyInt()))
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Tue Oct 29 00:49:20 2013
@@ -845,8 +845,8 @@ public class NNThroughputBenchmark imple
// TODO:FEDERATION currently a single block pool is supported
StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
- DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
- rep, 0, 0, 0).getCommands();
+ DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, rep,
+ 0L, 0L, 0, 0, 0).getCommands();
if(cmds != null) {
for (DatanodeCommand cmd : cmds ) {
if(LOG.isDebugEnabled()) {
@@ -893,7 +893,7 @@ public class NNThroughputBenchmark imple
StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
- rep, 0, 0, 0).getCommands();
+ rep, 0L, 0L, 0, 0, 0).getCommands();
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Tue Oct 29 00:49:20 2013
@@ -111,7 +111,8 @@ public class NameNodeAdapter {
public static HeartbeatResponse sendHeartBeat(DatanodeRegistration nodeReg,
DatanodeDescriptor dd, FSNamesystem namesystem) throws IOException {
return namesystem.handleHeartbeat(nodeReg, dd.getCapacity(),
- dd.getDfsUsed(), dd.getRemaining(), dd.getBlockPoolUsed(), 0, 0, 0);
+ dd.getDfsUsed(), dd.getRemaining(), dd.getBlockPoolUsed(),
+ dd.getCacheCapacity(), dd.getCacheRemaining(), 0, 0, 0);
}
public static boolean setReplication(final FSNamesystem ns,
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java Tue Oct 29 00:49:20 2013
@@ -39,8 +39,11 @@ import org.apache.hadoop.hdfs.DFSConfigK
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
+import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -229,6 +232,26 @@ public class OfflineEditsViewerHelper {
// OP_UPDATE_MASTER_KEY 21
// done by getDelegationTokenSecretManager().startThreads();
+ // OP_ADD_CACHE_POOL 35
+ final String pool = "poolparty";
+ dfs.addCachePool(new CachePoolInfo(pool));
+ // OP_MODIFY_CACHE_POOL 36
+ dfs.modifyCachePool(new CachePoolInfo(pool)
+ .setOwnerName("carlton")
+ .setGroupName("party")
+ .setMode(new FsPermission((short)0700))
+ .setWeight(1989));
+ // OP_ADD_PATH_BASED_CACHE_DIRECTIVE 33
+ PathBasedCacheDescriptor descriptor =
+ dfs.addPathBasedCacheDirective(new PathBasedCacheDirective.Builder().
+ setPath(new Path("/bar")).
+ setReplication((short)1).
+ setPool(pool).
+ build());
+ // OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR 34
+ dfs.removePathBasedCacheDescriptor(descriptor);
+ // OP_REMOVE_CACHE_POOL 37
+ dfs.removeCachePool(pool);
// sync to disk, otherwise we parse partial edits
cluster.getNameNode().getFSImage().getEditLog().logSync();
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Tue Oct 29 00:49:20 2013
@@ -142,7 +142,8 @@ public class TestDeadDatanode {
// that asks datanode to register again
StorageReport[] rep = { new StorageReport(reg.getStorageID(), false, 0, 0,
0, 0) };
- DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0, 0, 0).getCommands();
+ DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0)
+ .getCommands();
assertEquals(1, cmd.length);
assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
.getAction());
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Tue Oct 29 00:49:20 2013
@@ -413,7 +413,7 @@ public class TestNamenodeRetryCache {
LightWeightCache<CacheEntry, CacheEntry> cacheSet =
(LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
- assertEquals(14, cacheSet.size());
+ assertEquals(19, cacheSet.size());
Map<CacheEntry, CacheEntry> oldEntries =
new HashMap<CacheEntry, CacheEntry>();
@@ -432,7 +432,7 @@ public class TestNamenodeRetryCache {
assertTrue(namesystem.hasRetryCache());
cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
.getRetryCache().getCacheSet();
- assertEquals(14, cacheSet.size());
+ assertEquals(19, cacheSet.size());
iter = cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry = iter.next();
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Tue Oct 29 00:49:20 2013
@@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FSDataOutput
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -53,12 +54,15 @@ import org.apache.hadoop.hdfs.MiniDFSNNT
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
+import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
@@ -147,7 +151,7 @@ public class TestRetryCacheWithHA {
FSNamesystem fsn0 = cluster.getNamesystem(0);
LightWeightCache<CacheEntry, CacheEntry> cacheSet =
(LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
- assertEquals(14, cacheSet.size());
+ assertEquals(19, cacheSet.size());
Map<CacheEntry, CacheEntry> oldEntries =
new HashMap<CacheEntry, CacheEntry>();
@@ -168,7 +172,7 @@ public class TestRetryCacheWithHA {
FSNamesystem fsn1 = cluster.getNamesystem(1);
cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
.getRetryCache().getCacheSet();
- assertEquals(14, cacheSet.size());
+ assertEquals(19, cacheSet.size());
iter = cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry = iter.next();
@@ -733,6 +737,208 @@ public class TestRetryCacheWithHA {
}
}
+ /** addPathBasedCacheDirective */
+ class AddPathBasedCacheDirectiveOp extends AtMostOnceOp {
+ private String pool;
+ private String path;
+ private PathBasedCacheDescriptor descriptor;
+
+ AddPathBasedCacheDirectiveOp(DFSClient client, String pool, String path) {
+ super("addPathBasedCacheDirective", client);
+ this.pool = pool;
+ this.path = path;
+ }
+
+ @Override
+ void prepare() throws Exception {
+ dfs.addCachePool(new CachePoolInfo(pool));
+ }
+
+ @Override
+ void invoke() throws Exception {
+ descriptor = client.addPathBasedCacheDirective(
+ new PathBasedCacheDirective.Builder().
+ setPath(new Path(path)).
+ setPool(pool).
+ build());
+ }
+
+ @Override
+ boolean checkNamenodeBeforeReturn() throws Exception {
+ for (int i = 0; i < CHECKTIMES; i++) {
+ RemoteIterator<PathBasedCacheDescriptor> iter =
+ dfs.listPathBasedCacheDescriptors(pool, new Path(path));
+ if (iter.hasNext()) {
+ return true;
+ }
+ Thread.sleep(1000);
+ }
+ return false;
+ }
+
+ @Override
+ Object getResult() {
+ return descriptor;
+ }
+ }
+
+ /** removePathBasedCacheDescriptor */
+ class RemovePathBasedCacheDescriptorOp extends AtMostOnceOp {
+ private String pool;
+ private String path;
+ private PathBasedCacheDescriptor descriptor;
+
+ RemovePathBasedCacheDescriptorOp(DFSClient client, String pool,
+ String path) {
+ super("removePathBasedCacheDescriptor", client);
+ this.pool = pool;
+ this.path = path;
+ }
+
+ @Override
+ void prepare() throws Exception {
+ dfs.addCachePool(new CachePoolInfo(pool));
+ descriptor = dfs.addPathBasedCacheDirective(
+ new PathBasedCacheDirective.Builder().
+ setPath(new Path(path)).
+ setPool(pool).
+ build());
+ }
+
+ @Override
+ void invoke() throws Exception {
+ client.removePathBasedCacheDescriptor(descriptor.getEntryId());
+ }
+
+ @Override
+ boolean checkNamenodeBeforeReturn() throws Exception {
+ for (int i = 0; i < CHECKTIMES; i++) {
+ RemoteIterator<PathBasedCacheDescriptor> iter =
+ dfs.listPathBasedCacheDescriptors(pool, new Path(path));
+ if (!iter.hasNext()) {
+ return true;
+ }
+ Thread.sleep(1000);
+ }
+ return false;
+ }
+
+ @Override
+ Object getResult() {
+ return null;
+ }
+ }
+
+ /** addCachePool */
+ class AddCachePoolOp extends AtMostOnceOp {
+ private String pool;
+
+ AddCachePoolOp(DFSClient client, String pool) {
+ super("addCachePool", client);
+ this.pool = pool;
+ }
+
+ @Override
+ void prepare() throws Exception {
+ }
+
+ @Override
+ void invoke() throws Exception {
+ client.addCachePool(new CachePoolInfo(pool));
+ }
+
+ @Override
+ boolean checkNamenodeBeforeReturn() throws Exception {
+ for (int i = 0; i < CHECKTIMES; i++) {
+ RemoteIterator<CachePoolInfo> iter = dfs.listCachePools();
+ if (iter.hasNext()) {
+ return true;
+ }
+ Thread.sleep(1000);
+ }
+ return false;
+ }
+
+ @Override
+ Object getResult() {
+ return null;
+ }
+ }
+
+ /** modifyCachePool */
+ class ModifyCachePoolOp extends AtMostOnceOp {
+ String pool;
+
+ ModifyCachePoolOp(DFSClient client, String pool) {
+ super("modifyCachePool", client);
+ this.pool = pool;
+ }
+
+ @Override
+ void prepare() throws Exception {
+ client.addCachePool(new CachePoolInfo(pool).setWeight(10));
+ }
+
+ @Override
+ void invoke() throws Exception {
+ client.modifyCachePool(new CachePoolInfo(pool).setWeight(99));
+ }
+
+ @Override
+ boolean checkNamenodeBeforeReturn() throws Exception {
+ for (int i = 0; i < CHECKTIMES; i++) {
+ RemoteIterator<CachePoolInfo> iter = dfs.listCachePools();
+ if (iter.hasNext() && iter.next().getWeight() == 99) {
+ return true;
+ }
+ Thread.sleep(1000);
+ }
+ return false;
+ }
+
+ @Override
+ Object getResult() {
+ return null;
+ }
+ }
+
+ /** removeCachePool */
+ class RemoveCachePoolOp extends AtMostOnceOp {
+ private String pool;
+
+ RemoveCachePoolOp(DFSClient client, String pool) {
+ super("removeCachePool", client);
+ this.pool = pool;
+ }
+
+ @Override
+ void prepare() throws Exception {
+ client.addCachePool(new CachePoolInfo(pool));
+ }
+
+ @Override
+ void invoke() throws Exception {
+ client.removeCachePool(pool);
+ }
+
+ @Override
+ boolean checkNamenodeBeforeReturn() throws Exception {
+ for (int i = 0; i < CHECKTIMES; i++) {
+ RemoteIterator<CachePoolInfo> iter = dfs.listCachePools();
+ if (!iter.hasNext()) {
+ return true;
+ }
+ Thread.sleep(1000);
+ }
+ return false;
+ }
+
+ @Override
+ Object getResult() {
+ return null;
+ }
+ }
+
@Test (timeout=60000)
public void testCreateSnapshot() throws Exception {
final DFSClient client = genClientWithDummyHandler();
@@ -810,6 +1016,42 @@ public class TestRetryCacheWithHA {
testClientRetryWithFailover(op);
}
+ @Test (timeout=60000)
+ public void testAddPathBasedCacheDirective() throws Exception {
+ DFSClient client = genClientWithDummyHandler();
+ AtMostOnceOp op = new AddPathBasedCacheDirectiveOp(client, "pool", "/path");
+ testClientRetryWithFailover(op);
+ }
+
+ @Test (timeout=60000)
+ public void testRemovePathBasedCacheDescriptor() throws Exception {
+ DFSClient client = genClientWithDummyHandler();
+ AtMostOnceOp op = new RemovePathBasedCacheDescriptorOp(client, "pool",
+ "/path");
+ testClientRetryWithFailover(op);
+ }
+
+ @Test (timeout=60000)
+ public void testAddCachePool() throws Exception {
+ DFSClient client = genClientWithDummyHandler();
+ AtMostOnceOp op = new AddCachePoolOp(client, "pool");
+ testClientRetryWithFailover(op);
+ }
+
+ @Test (timeout=60000)
+ public void testModifyCachePool() throws Exception {
+ DFSClient client = genClientWithDummyHandler();
+ AtMostOnceOp op = new ModifyCachePoolOp(client, "pool");
+ testClientRetryWithFailover(op);
+ }
+
+ @Test (timeout=60000)
+ public void testRemoveCachePool() throws Exception {
+ DFSClient client = genClientWithDummyHandler();
+ AtMostOnceOp op = new RemoveCachePoolOp(client, "pool");
+ testClientRetryWithFailover(op);
+ }
+
/**
* When NN failover happens, if the client did not receive the response and
* send a retry request to the other NN, the same response should be recieved
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java Tue Oct 29 00:49:20 2013
@@ -79,6 +79,8 @@ public class TestJsonUtil {
response.put("xceiverCount", 4096l);
response.put("networkLocation", "foo.bar.baz");
response.put("adminState", "NORMAL");
+ response.put("cacheCapacity", 123l);
+ response.put("cacheUsed", 321l);
JsonUtil.toDatanodeInfo(response);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
Binary files - no diff available.
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Tue Oct 29 00:49:20 2013
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<EDITS>
- <EDITS_VERSION>-47</EDITS_VERSION>
+ <EDITS_VERSION>-48</EDITS_VERSION>
<RECORD>
<OPCODE>OP_START_LOG_SEGMENT</OPCODE>
<DATA>
@@ -13,8 +13,8 @@
<TXID>2</TXID>
<DELEGATION_KEY>
<KEY_ID>1</KEY_ID>
- <EXPIRY_DATE>1375509063810</EXPIRY_DATE>
- <KEY>4d47710649039b98</KEY>
+ <EXPIRY_DATE>1381946377599</EXPIRY_DATE>
+ <KEY>4f37c0db7342fb35</KEY>
</DELEGATION_KEY>
</DATA>
</RECORD>
@@ -24,8 +24,8 @@
<TXID>3</TXID>
<DELEGATION_KEY>
<KEY_ID>2</KEY_ID>
- <EXPIRY_DATE>1375509063812</EXPIRY_DATE>
- <KEY>38cbb1d8fd90fcb2</KEY>
+ <EXPIRY_DATE>1381946377609</EXPIRY_DATE>
+ <KEY>471d4ddd00402ba6</KEY>
</DELEGATION_KEY>
</DATA>
</RECORD>
@@ -37,17 +37,17 @@
<INODEID>16386</INODEID>
<PATH>/file_create_u\0001;F431</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1374817864805</MTIME>
- <ATIME>1374817864805</ATIME>
+ <MTIME>1381255179312</MTIME>
+ <ATIME>1381255179312</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_-134124999_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
<RPC_CALLID>8</RPC_CALLID>
</DATA>
</RECORD>
@@ -59,13 +59,13 @@
<INODEID>0</INODEID>
<PATH>/file_create_u\0001;F431</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1374817864816</MTIME>
- <ATIME>1374817864805</ATIME>
+ <MTIME>1381255179355</MTIME>
+ <ATIME>1381255179312</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -78,8 +78,8 @@
<LENGTH>0</LENGTH>
<SRC>/file_create_u\0001;F431</SRC>
<DST>/file_moved</DST>
- <TIMESTAMP>1374817864818</TIMESTAMP>
- <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+ <TIMESTAMP>1381255179373</TIMESTAMP>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
<RPC_CALLID>10</RPC_CALLID>
</DATA>
</RECORD>
@@ -89,8 +89,8 @@
<TXID>7</TXID>
<LENGTH>0</LENGTH>
<PATH>/file_moved</PATH>
- <TIMESTAMP>1374817864822</TIMESTAMP>
- <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+ <TIMESTAMP>1381255179397</TIMESTAMP>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
<RPC_CALLID>11</RPC_CALLID>
</DATA>
</RECORD>
@@ -101,9 +101,9 @@
<LENGTH>0</LENGTH>
<INODEID>16387</INODEID>
<PATH>/directory_mkdir</PATH>
- <TIMESTAMP>1374817864825</TIMESTAMP>
+ <TIMESTAMP>1381255179424</TIMESTAMP>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>493</MODE>
</PERMISSION_STATUS>
@@ -136,7 +136,7 @@
<TXID>12</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
- <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
<RPC_CALLID>16</RPC_CALLID>
</DATA>
</RECORD>
@@ -147,7 +147,7 @@
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
<SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
- <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
<RPC_CALLID>17</RPC_CALLID>
</DATA>
</RECORD>
@@ -157,7 +157,7 @@
<TXID>14</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
- <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
<RPC_CALLID>18</RPC_CALLID>
</DATA>
</RECORD>
@@ -169,17 +169,17 @@
<INODEID>16388</INODEID>
<PATH>/file_create_u\0001;F431</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1374817864846</MTIME>
- <ATIME>1374817864846</ATIME>
+ <MTIME>1381255179522</MTIME>
+ <ATIME>1381255179522</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_-134124999_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
<RPC_CALLID>19</RPC_CALLID>
</DATA>
</RECORD>
@@ -191,13 +191,13 @@
<INODEID>0</INODEID>
<PATH>/file_create_u\0001;F431</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1374817864848</MTIME>
- <ATIME>1374817864846</ATIME>
+ <MTIME>1381255179531</MTIME>
+ <ATIME>1381255179522</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -253,9 +253,9 @@
<LENGTH>0</LENGTH>
<SRC>/file_create_u\0001;F431</SRC>
<DST>/file_moved</DST>
- <TIMESTAMP>1374817864860</TIMESTAMP>
+ <TIMESTAMP>1381255179602</TIMESTAMP>
<OPTIONS>NONE</OPTIONS>
- <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
<RPC_CALLID>26</RPC_CALLID>
</DATA>
</RECORD>
@@ -267,17 +267,17 @@
<INODEID>16389</INODEID>
<PATH>/file_concat_target</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1374817864864</MTIME>
- <ATIME>1374817864864</ATIME>
+ <MTIME>1381255179619</MTIME>
+ <ATIME>1381255179619</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_-134124999_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
<RPC_CALLID>28</RPC_CALLID>
</DATA>
</RECORD>
@@ -388,8 +388,8 @@
<INODEID>0</INODEID>
<PATH>/file_concat_target</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1374817864927</MTIME>
- <ATIME>1374817864864</ATIME>
+ <MTIME>1381255179862</MTIME>
+ <ATIME>1381255179619</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
@@ -409,7 +409,7 @@
<GENSTAMP>1003</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -423,17 +423,17 @@
<INODEID>16390</INODEID>
<PATH>/file_concat_0</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1374817864929</MTIME>
- <ATIME>1374817864929</ATIME>
+ <MTIME>1381255179876</MTIME>
+ <ATIME>1381255179876</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_-134124999_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
<RPC_CALLID>41</RPC_CALLID>
</DATA>
</RECORD>
@@ -544,8 +544,8 @@
<INODEID>0</INODEID>
<PATH>/file_concat_0</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1374817864947</MTIME>
- <ATIME>1374817864929</ATIME>
+ <MTIME>1381255179957</MTIME>
+ <ATIME>1381255179876</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
@@ -565,7 +565,7 @@
<GENSTAMP>1006</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -579,17 +579,17 @@
<INODEID>16391</INODEID>
<PATH>/file_concat_1</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1374817864950</MTIME>
- <ATIME>1374817864950</ATIME>
+ <MTIME>1381255179967</MTIME>
+ <ATIME>1381255179967</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_-134124999_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
<RPC_CALLID>53</RPC_CALLID>
</DATA>
</RECORD>
@@ -700,8 +700,8 @@
<INODEID>0</INODEID>
<PATH>/file_concat_1</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1374817864966</MTIME>
- <ATIME>1374817864950</ATIME>
+ <MTIME>1381255180085</MTIME>
+ <ATIME>1381255179967</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
@@ -721,7 +721,7 @@
<GENSTAMP>1009</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -733,12 +733,12 @@
<TXID>56</TXID>
<LENGTH>0</LENGTH>
<TRG>/file_concat_target</TRG>
- <TIMESTAMP>1374817864967</TIMESTAMP>
+ <TIMESTAMP>1381255180099</TIMESTAMP>
<SOURCES>
<SOURCE1>/file_concat_0</SOURCE1>
<SOURCE2>/file_concat_1</SOURCE2>
</SOURCES>
- <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
<RPC_CALLID>64</RPC_CALLID>
</DATA>
</RECORD>
@@ -750,14 +750,14 @@
<INODEID>16392</INODEID>
<PATH>/file_symlink</PATH>
<VALUE>/file_concat_target</VALUE>
- <MTIME>1374817864971</MTIME>
- <ATIME>1374817864971</ATIME>
+ <MTIME>1381255180116</MTIME>
+ <ATIME>1381255180116</ATIME>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>511</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
<RPC_CALLID>65</RPC_CALLID>
</DATA>
</RECORD>
@@ -768,14 +768,14 @@
<DELEGATION_TOKEN_IDENTIFIER>
<KIND>HDFS_DELEGATION_TOKEN</KIND>
<SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
- <OWNER>jing</OWNER>
+ <OWNER>andrew</OWNER>
<RENEWER>JobTracker</RENEWER>
<REALUSER></REALUSER>
- <ISSUE_DATE>1374817864974</ISSUE_DATE>
- <MAX_DATE>1375422664974</MAX_DATE>
+ <ISSUE_DATE>1381255180128</ISSUE_DATE>
+ <MAX_DATE>1381859980128</MAX_DATE>
<MASTER_KEY_ID>2</MASTER_KEY_ID>
</DELEGATION_TOKEN_IDENTIFIER>
- <EXPIRY_TIME>1374904264974</EXPIRY_TIME>
+ <EXPIRY_TIME>1381341580128</EXPIRY_TIME>
</DATA>
</RECORD>
<RECORD>
@@ -785,14 +785,14 @@
<DELEGATION_TOKEN_IDENTIFIER>
<KIND>HDFS_DELEGATION_TOKEN</KIND>
<SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
- <OWNER>jing</OWNER>
+ <OWNER>andrew</OWNER>
<RENEWER>JobTracker</RENEWER>
<REALUSER></REALUSER>
- <ISSUE_DATE>1374817864974</ISSUE_DATE>
- <MAX_DATE>1375422664974</MAX_DATE>
+ <ISSUE_DATE>1381255180128</ISSUE_DATE>
+ <MAX_DATE>1381859980128</MAX_DATE>
<MASTER_KEY_ID>2</MASTER_KEY_ID>
</DELEGATION_TOKEN_IDENTIFIER>
- <EXPIRY_TIME>1374904265012</EXPIRY_TIME>
+ <EXPIRY_TIME>1381341580177</EXPIRY_TIME>
</DATA>
</RECORD>
<RECORD>
@@ -802,55 +802,112 @@
<DELEGATION_TOKEN_IDENTIFIER>
<KIND>HDFS_DELEGATION_TOKEN</KIND>
<SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
- <OWNER>jing</OWNER>
+ <OWNER>andrew</OWNER>
<RENEWER>JobTracker</RENEWER>
<REALUSER></REALUSER>
- <ISSUE_DATE>1374817864974</ISSUE_DATE>
- <MAX_DATE>1375422664974</MAX_DATE>
+ <ISSUE_DATE>1381255180128</ISSUE_DATE>
+ <MAX_DATE>1381859980128</MAX_DATE>
<MASTER_KEY_ID>2</MASTER_KEY_ID>
</DELEGATION_TOKEN_IDENTIFIER>
</DATA>
</RECORD>
<RECORD>
- <OPCODE>OP_ADD</OPCODE>
+ <OPCODE>OP_ADD_CACHE_POOL</OPCODE>
<DATA>
<TXID>61</TXID>
+ <POOLNAME>poolparty</POOLNAME>
+ <PERMISSION_STATUS>
+ <USERNAME>andrew</USERNAME>
+ <GROUPNAME>andrew</GROUPNAME>
+ <MODE>493</MODE>
+ </PERMISSION_STATUS>
+ <WEIGHT>100</WEIGHT>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
+ <RPC_CALLID>75</RPC_CALLID>
+ </DATA>
+ </RECORD>
+ <RECORD>
+ <OPCODE>OP_MODIFY_CACHE_POOL</OPCODE>
+ <DATA>
+ <TXID>62</TXID>
+ <POOLNAME>poolparty</POOLNAME>
+ <OWNERNAME>carlton</OWNERNAME>
+ <GROUPNAME>party</GROUPNAME>
+ <MODE>448</MODE>
+ <WEIGHT>1989</WEIGHT>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
+ <RPC_CALLID>76</RPC_CALLID>
+ </DATA>
+ </RECORD>
+ <RECORD>
+ <OPCODE>OP_ADD_PATH_BASED_CACHE_DIRECTIVE</OPCODE>
+ <DATA>
+ <TXID>63</TXID>
+ <PATH>/bar</PATH>
+ <REPLICATION>1</REPLICATION>
+ <POOL>poolparty</POOL>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
+ <RPC_CALLID>77</RPC_CALLID>
+ </DATA>
+ </RECORD>
+ <RECORD>
+ <OPCODE>OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR</OPCODE>
+ <DATA>
+ <TXID>64</TXID>
+ <ID>1</ID>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
+ <RPC_CALLID>78</RPC_CALLID>
+ </DATA>
+ </RECORD>
+ <RECORD>
+ <OPCODE>OP_REMOVE_CACHE_POOL</OPCODE>
+ <DATA>
+ <TXID>65</TXID>
+ <POOLNAME>poolparty</POOLNAME>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
+ <RPC_CALLID>79</RPC_CALLID>
+ </DATA>
+ </RECORD>
+ <RECORD>
+ <OPCODE>OP_ADD</OPCODE>
+ <DATA>
+ <TXID>66</TXID>
<LENGTH>0</LENGTH>
<INODEID>16393</INODEID>
<PATH>/hard-lease-recovery-test</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1374817865017</MTIME>
- <ATIME>1374817865017</ATIME>
+ <MTIME>1381255180288</MTIME>
+ <ATIME>1381255180288</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_-134124999_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
- <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
- <RPC_CALLID>69</RPC_CALLID>
+ <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID>
+ <RPC_CALLID>74</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
<DATA>
- <TXID>62</TXID>
+ <TXID>67</TXID>
<BLOCK_ID>1073741834</BLOCK_ID>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
<DATA>
- <TXID>63</TXID>
+ <TXID>68</TXID>
<GENSTAMPV2>1010</GENSTAMPV2>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<DATA>
- <TXID>64</TXID>
+ <TXID>69</TXID>
<PATH>/hard-lease-recovery-test</PATH>
<BLOCK>
<BLOCK_ID>1073741834</BLOCK_ID>
@@ -864,7 +921,7 @@
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<DATA>
- <TXID>65</TXID>
+ <TXID>70</TXID>
<PATH>/hard-lease-recovery-test</PATH>
<BLOCK>
<BLOCK_ID>1073741834</BLOCK_ID>
@@ -878,15 +935,31 @@
<RECORD>
<OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
<DATA>
- <TXID>66</TXID>
+ <TXID>71</TXID>
<GENSTAMPV2>1011</GENSTAMPV2>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_REASSIGN_LEASE</OPCODE>
<DATA>
- <TXID>67</TXID>
- <LEASEHOLDER>DFSClient_NONMAPREDUCE_-1676409172_1</LEASEHOLDER>
+ <TXID>72</TXID>
+ <LEASEHOLDER>DFSClient_NONMAPREDUCE_-134124999_1</LEASEHOLDER>
+ <PATH>/hard-lease-recovery-test</PATH>
+ <NEWHOLDER>HDFS_NameNode</NEWHOLDER>
+ </DATA>
+ </RECORD>
+ <RECORD>
+ <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
+ <DATA>
+ <TXID>73</TXID>
+ <GENSTAMPV2>1012</GENSTAMPV2>
+ </DATA>
+ </RECORD>
+ <RECORD>
+ <OPCODE>OP_REASSIGN_LEASE</OPCODE>
+ <DATA>
+ <TXID>74</TXID>
+ <LEASEHOLDER>HDFS_NameNode</LEASEHOLDER>
<PATH>/hard-lease-recovery-test</PATH>
<NEWHOLDER>HDFS_NameNode</NEWHOLDER>
</DATA>
@@ -894,23 +967,23 @@
<RECORD>
<OPCODE>OP_CLOSE</OPCODE>
<DATA>
- <TXID>68</TXID>
+ <TXID>75</TXID>
<LENGTH>0</LENGTH>
<INODEID>0</INODEID>
<PATH>/hard-lease-recovery-test</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1374817867688</MTIME>
- <ATIME>1374817865017</ATIME>
+ <MTIME>1381255185142</MTIME>
+ <ATIME>1381255180288</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<BLOCK>
<BLOCK_ID>1073741834</BLOCK_ID>
<NUM_BYTES>11</NUM_BYTES>
- <GENSTAMP>1011</GENSTAMP>
+ <GENSTAMP>1012</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -919,7 +992,7 @@
<RECORD>
<OPCODE>OP_END_LOG_SEGMENT</OPCODE>
<DATA>
- <TXID>69</TXID>
+ <TXID>76</TXID>
</DATA>
</RECORD>
</EDITS>
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml?rev=1536572&r1=1536571&r2=1536572&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml Tue Oct 29 00:49:20 2013
@@ -16524,7 +16524,7 @@
</comparators>
</test>
- <test> <!--Tested -->
+ <test> <!--Tested -->
<description>Verifying clrSpaceQuota operation is not permitted in safemode</description>
<test-commands>
<command>-fs NAMENODE -mkdir /test </command>