You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/10/15 15:48:57 UTC
svn commit: r1398288 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/
src/main/java/org/apache/hadoop/hdfs/server/namenode/
src/test/java/org/apache/hadoop/hdfs/server/blockmanag...
Author: szetszwo
Date: Mon Oct 15 13:48:56 2012
New Revision: 1398288
URL: http://svn.apache.org/viewvc?rev=1398288&view=rev
Log:
HDFS-4037. Rename the getReplication() method in BlockCollection to getBlockReplication().
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1398288&r1=1398287&r2=1398288&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Oct 15 13:48:56 2012
@@ -350,8 +350,8 @@ Release 2.0.3-alpha - Unreleased
HDFS-3939. NN RPC address cleanup. (eli)
- HDFS-3373. Change DFSClient input stream socket cache to global static and add
- a thread to cleanup expired cache entries. (John George via szetszwo)
+ HDFS-3373. Change DFSClient input stream socket cache to global static and
+ add a thread to cleanup expired cache entries. (John George via szetszwo)
HDFS-3896. Add descriptions for dfs.namenode.rpc-address and
dfs.namenode.servicerpc-address to hdfs-default.xml. (Jeff Lord via atm)
@@ -382,6 +382,9 @@ Release 2.0.3-alpha - Unreleased
HADOOP-8911. CRLF characters in source and text files.
(Raja Aluri via suresh)
+ HDFS-4037. Rename the getReplication() method in BlockCollection to
+ getBlockReplication(). (szetszwo)
+
OPTIMIZATIONS
BUG FIXES
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java?rev=1398288&r1=1398287&r2=1398288&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java Mon Oct 15 13:48:56 2012
@@ -19,12 +19,14 @@ package org.apache.hadoop.hdfs.server.bl
import java.io.IOException;
+import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.ContentSummary;
/**
* This interface is used by the block manager to expose a
* few characteristics of a collection of Block/BlockUnderConstruction.
*/
+@InterfaceAudience.Private
public interface BlockCollection {
/**
* Get the last block of the collection.
@@ -56,7 +58,7 @@ public interface BlockCollection {
* Get block replication for the collection
* @return block replication value
*/
- public short getReplication();
+ public short getBlockReplication();
/**
* Get the name of the collection.
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java?rev=1398288&r1=1398287&r2=1398288&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java Mon Oct 15 13:48:56 2012
@@ -73,7 +73,7 @@ public class BlockInfo extends Block imp
* @param from BlockInfo to copy from.
*/
protected BlockInfo(BlockInfo from) {
- this(from, from.bc.getReplication());
+ this(from, from.bc.getBlockReplication());
this.bc = from.bc;
}
@@ -335,7 +335,7 @@ public class BlockInfo extends Block imp
BlockUCState s, DatanodeDescriptor[] targets) {
if(isComplete()) {
return new BlockInfoUnderConstruction(
- this, getBlockCollection().getReplication(), s, targets);
+ this, getBlockCollection().getBlockReplication(), s, targets);
}
// the block is already under construction
BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)this;
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1398288&r1=1398287&r2=1398288&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Mon Oct 15 13:48:56 2012
@@ -997,7 +997,7 @@ public class BlockManager {
// Add this replica to corruptReplicas Map
corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason);
- if (countNodes(b.stored).liveReplicas() >= bc.getReplication()) {
+ if (countNodes(b.stored).liveReplicas() >= bc.getBlockReplication()) {
// the block is over-replicated so invalidate the replicas immediately
invalidateBlock(b, node);
} else if (namesystem.isPopulatingReplQueues()) {
@@ -1135,7 +1135,7 @@ public class BlockManager {
continue;
}
- requiredReplication = bc.getReplication();
+ requiredReplication = bc.getBlockReplication();
// get a source data-node
containingNodes = new ArrayList<DatanodeDescriptor>();
@@ -1221,7 +1221,7 @@ public class BlockManager {
neededReplications.decrementReplicationIndex(priority);
continue;
}
- requiredReplication = bc.getReplication();
+ requiredReplication = bc.getBlockReplication();
// do not schedule more if enough replicas is already pending
NumberReplicas numReplicas = countNodes(block);
@@ -2089,7 +2089,7 @@ assert storedBlock.findDatanode(dn) < 0
}
// handle underReplication/overReplication
- short fileReplication = bc.getReplication();
+ short fileReplication = bc.getBlockReplication();
if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
neededReplications.remove(storedBlock, numCurrentReplica,
num.decommissionedReplicas(), fileReplication);
@@ -2228,7 +2228,7 @@ assert storedBlock.findDatanode(dn) < 0
return MisReplicationResult.UNDER_CONSTRUCTION;
}
// calculate current replication
- short expectedReplication = bc.getReplication();
+ short expectedReplication = bc.getBlockReplication();
NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas();
// add to under-replicated queue if need to be
@@ -2699,7 +2699,7 @@ assert storedBlock.findDatanode(dn) < 0
while(it.hasNext()) {
final Block block = it.next();
BlockCollection bc = blocksMap.getBlockCollection(block);
- short expectedReplication = bc.getReplication();
+ short expectedReplication = bc.getBlockReplication();
NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas();
if (numCurrentReplica > expectedReplication) {
@@ -2845,7 +2845,7 @@ assert storedBlock.findDatanode(dn) < 0
if (bc == null) { // block does not belong to any file
return 0;
}
- return bc.getReplication();
+ return bc.getBlockReplication();
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1398288&r1=1398287&r2=1398288&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Mon Oct 15 13:48:56 2012
@@ -345,13 +345,13 @@ public class FSDirectory implements Clos
// check quota limits and updated space consumed
updateCount(inodes, inodes.length-1, 0,
- fileINode.getPreferredBlockSize()*fileINode.getReplication(), true);
+ fileINode.getPreferredBlockSize()*fileINode.getBlockReplication(), true);
// associate new last block for the file
BlockInfoUnderConstruction blockInfo =
new BlockInfoUnderConstruction(
block,
- fileINode.getReplication(),
+ fileINode.getBlockReplication(),
BlockUCState.UNDER_CONSTRUCTION,
targets);
getBlockManager().addBlockCollection(blockInfo, fileINode);
@@ -442,7 +442,7 @@ public class FSDirectory implements Clos
// update space consumed
INode[] pathINodes = getExistingPathINodes(path);
updateCount(pathINodes, pathINodes.length-1, 0,
- -fileNode.getPreferredBlockSize()*fileNode.getReplication(), true);
+ -fileNode.getPreferredBlockSize()*fileNode.getBlockReplication(), true);
}
/**
@@ -821,7 +821,7 @@ public class FSDirectory implements Clos
return null;
}
INodeFile fileNode = (INodeFile)inode;
- final short oldRepl = fileNode.getReplication();
+ final short oldRepl = fileNode.getBlockReplication();
// check disk quota
long dsDelta = (replication - oldRepl) * (fileNode.diskspaceConsumed()/oldRepl);
@@ -2061,7 +2061,7 @@ public class FSDirectory implements Clos
if (node instanceof INodeFile) {
INodeFile fileNode = (INodeFile)node;
size = fileNode.computeFileSize(true);
- replication = fileNode.getReplication();
+ replication = fileNode.getBlockReplication();
blocksize = fileNode.getPreferredBlockSize();
}
return new HdfsFileStatus(
@@ -2091,7 +2091,7 @@ public class FSDirectory implements Clos
if (node instanceof INodeFile) {
INodeFile fileNode = (INodeFile)node;
size = fileNode.computeFileSize(true);
- replication = fileNode.getReplication();
+ replication = fileNode.getBlockReplication();
blocksize = fileNode.getPreferredBlockSize();
loc = getFSNamesystem().getBlockManager().createLocatedBlocks(
fileNode.getBlocks(), fileNode.computeFileSize(false),
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1398288&r1=1398287&r2=1398288&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Mon Oct 15 13:48:56 2012
@@ -657,7 +657,7 @@ public class FSEditLog implements LogsPu
public void logOpenFile(String path, INodeFileUnderConstruction newNode) {
AddOp op = AddOp.getInstance(cache.get())
.setPath(path)
- .setReplication(newNode.getReplication())
+ .setReplication(newNode.getBlockReplication())
.setModificationTime(newNode.getModificationTime())
.setAccessTime(newNode.getAccessTime())
.setBlockSize(newNode.getPreferredBlockSize())
@@ -675,7 +675,7 @@ public class FSEditLog implements LogsPu
public void logCloseFile(String path, INodeFile newNode) {
CloseOp op = CloseOp.getInstance(cache.get())
.setPath(path)
- .setReplication(newNode.getReplication())
+ .setReplication(newNode.getBlockReplication())
.setModificationTime(newNode.getModificationTime())
.setAccessTime(newNode.getAccessTime())
.setBlockSize(newNode.getPreferredBlockSize())
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1398288&r1=1398287&r2=1398288&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Mon Oct 15 13:48:56 2012
@@ -592,13 +592,13 @@ public class FSEditLogLoader {
// what about an old-version fsync() where fsync isn't called
// until several blocks in?
newBI = new BlockInfoUnderConstruction(
- newBlock, file.getReplication());
+ newBlock, file.getBlockReplication());
} else {
// OP_CLOSE should add finalized blocks. This code path
// is only executed when loading edits written by prior
// versions of Hadoop. Current versions always log
// OP_ADD operations as each block is allocated.
- newBI = new BlockInfo(newBlock, file.getReplication());
+ newBI = new BlockInfo(newBlock, file.getBlockReplication());
}
fsNamesys.getBlockManager().addBlockCollection(newBI, file);
file.addBlock(newBI);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java?rev=1398288&r1=1398287&r2=1398288&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java Mon Oct 15 13:48:56 2012
@@ -126,7 +126,7 @@ public class FSImageSerialization {
String path)
throws IOException {
writeString(path, out);
- out.writeShort(cons.getReplication());
+ out.writeShort(cons.getBlockReplication());
out.writeLong(cons.getModificationTime());
out.writeLong(cons.getPreferredBlockSize());
int nrBlocks = cons.getBlocks().length;
@@ -175,7 +175,7 @@ public class FSImageSerialization {
filePerm);
} else {
INodeFile fileINode = (INodeFile)node;
- out.writeShort(fileINode.getReplication());
+ out.writeShort(fileINode.getBlockReplication());
out.writeLong(fileINode.getModificationTime());
out.writeLong(fileINode.getAccessTime());
out.writeLong(fileINode.getPreferredBlockSize());
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1398288&r1=1398287&r2=1398288&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon Oct 15 13:48:56 2012
@@ -1411,7 +1411,7 @@ public class FSNamesystem implements Nam
}
si.add(trgInode);
- short repl = trgInode.getReplication();
+ short repl = trgInode.getBlockReplication();
// now check the srcs
boolean endSrc = false; // final src file doesn't have to have full end block
@@ -1431,10 +1431,10 @@ public class FSNamesystem implements Nam
}
// check replication and blocks size
- if(repl != srcInode.getReplication()) {
+ if(repl != srcInode.getBlockReplication()) {
throw new IllegalArgumentException(src + " and " + target + " " +
"should have same replication: "
- + repl + " vs. " + srcInode.getReplication());
+ + repl + " vs. " + srcInode.getBlockReplication());
}
//boolean endBlock=false;
@@ -1877,7 +1877,7 @@ public class FSNamesystem implements Nam
boolean writeToEditLog) throws IOException {
INodeFileUnderConstruction cons = new INodeFileUnderConstruction(
file.getLocalNameBytes(),
- file.getReplication(),
+ file.getBlockReplication(),
file.getModificationTime(),
file.getPreferredBlockSize(),
file.getBlocks(),
@@ -2191,7 +2191,7 @@ public class FSNamesystem implements Nam
fileLength = pendingFile.computeContentSummary().getLength();
blockSize = pendingFile.getPreferredBlockSize();
clientNode = pendingFile.getClientNode();
- replication = pendingFile.getReplication();
+ replication = pendingFile.getBlockReplication();
} finally {
writeUnlock();
}
@@ -2435,7 +2435,7 @@ public class FSNamesystem implements Nam
* them into invalidateBlocks.
*/
private void checkReplicationFactor(INodeFile file) {
- short numExpectedReplicas = file.getReplication();
+ short numExpectedReplicas = file.getBlockReplication();
Block[] pendingBlocks = file.getBlocks();
int nrBlocks = pendingBlocks.length;
for (int i = 0; i < nrBlocks; i++) {
@@ -3154,7 +3154,7 @@ public class FSNamesystem implements Nam
if (diff > 0) {
try {
String path = leaseManager.findPath(fileINode);
- dir.updateSpaceConsumed(path, 0, -diff * fileINode.getReplication());
+ dir.updateSpaceConsumed(path, 0, -diff * fileINode.getBlockReplication());
} catch (IOException e) {
LOG.warn("Unexpected exception while updating disk space.", e);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1398288&r1=1398287&r2=1398288&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Mon Oct 15 13:48:56 2012
@@ -71,7 +71,7 @@ public class INodeFile extends INode imp
/** @return the replication factor of the file. */
@Override
- public short getReplication() {
+ public short getBlockReplication() {
return (short) ((header & HEADERMASK) >> BLOCKBITS);
}
@@ -215,7 +215,7 @@ public class INodeFile extends INode imp
isUnderConstruction()) {
size += getPreferredBlockSize() - blkArr[blkArr.length-1].getNumBytes();
}
- return size * getReplication();
+ return size * getBlockReplication();
}
/**
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=1398288&r1=1398287&r2=1398288&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Mon Oct 15 13:48:56 2012
@@ -104,7 +104,7 @@ class INodeFileUnderConstruction extends
"non-complete blocks! Blocks are: " + blocksAsString();
INodeFile obj = new INodeFile(getPermissionStatus(),
getBlocks(),
- getReplication(),
+ getBlockReplication(),
getModificationTime(),
getModificationTime(),
getPreferredBlockSize());
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1398288&r1=1398287&r2=1398288&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Mon Oct 15 13:48:56 2012
@@ -834,7 +834,7 @@ class NamenodeJspHelper {
doc.endTag();
doc.startTag("replication");
- doc.pcdata(""+inode.getReplication());
+ doc.pcdata(""+inode.getBlockReplication());
doc.endTag();
doc.startTag("disk_space_consumed");
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1398288&r1=1398287&r2=1398288&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java Mon Oct 15 13:48:56 2012
@@ -379,7 +379,7 @@ public class TestBlockManager {
private BlockInfo addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
BlockCollection bc = Mockito.mock(BlockCollection.class);
- Mockito.doReturn((short)3).when(bc).getReplication();
+ Mockito.doReturn((short)3).when(bc).getBlockReplication();
BlockInfo blockInfo = blockOnNodes(blockId, nodes);
bm.blocksMap.addBlockCollection(blockInfo, bc);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1398288&r1=1398287&r2=1398288&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Mon Oct 15 13:48:56 2012
@@ -48,7 +48,7 @@ public class TestINodeFile {
FsPermission.getDefault()), null, replication,
0L, 0L, preferredBlockSize);
assertEquals("True has to be returned in this case", replication,
- inf.getReplication());
+ inf.getBlockReplication());
}
/**