You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/02/10 02:49:24 UTC
svn commit: r1242635 [3/5] - in
/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project: hadoop-hdfs-httpfs/
hadoop-hdfs/ hadoop-hdfs/src/main/bin/
hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/
hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main...
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Fri Feb 10 01:49:08 2012
@@ -23,7 +23,6 @@ import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
-import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
@@ -75,20 +74,19 @@ import org.apache.hadoop.util.Reflection
*
***************************************************/
@InterfaceAudience.Private
-public class FSDataset implements FSDatasetInterface {
+class FSDataset implements FSDatasetInterface {
/**
* A node type that can be built into a tree reflecting the
* hierarchy of blocks on the local disk.
*/
- class FSDir {
- File dir;
+ private class FSDir {
+ final File dir;
int numBlocks = 0;
FSDir children[];
int lastChildIdx = 0;
- /**
- */
- public FSDir(File dir)
+
+ private FSDir(File dir)
throws IOException {
this.dir = dir;
this.children = null;
@@ -113,7 +111,7 @@ public class FSDataset implements FSData
}
}
- public File addBlock(Block b, File src) throws IOException {
+ private File addBlock(Block b, File src) throws IOException {
//First try without creating subdirectories
File file = addBlock(b, src, false, false);
return (file != null) ? file : addBlock(b, src, true, true);
@@ -161,7 +159,7 @@ public class FSDataset implements FSData
return children[ lastChildIdx ].addBlock(b, src, true, false);
}
- void getVolumeMap(String bpid, ReplicasMap volumeMap, FSVolume volume)
+ private void getVolumeMap(String bpid, ReplicasMap volumeMap, FSVolume volume)
throws IOException {
if (children != null) {
for (int i = 0; i < children.length; i++) {
@@ -207,7 +205,7 @@ public class FSDataset implements FSData
* check if a data diretory is healthy
* @throws DiskErrorException
*/
- public void checkDirTree() throws DiskErrorException {
+ private void checkDirTree() throws DiskErrorException {
DiskChecker.checkDir(dir);
if (children != null) {
@@ -217,7 +215,7 @@ public class FSDataset implements FSData
}
}
- void clearPath(File f) {
+ private void clearPath(File f) {
String root = dir.getAbsolutePath();
String dir = f.getAbsolutePath();
if (dir.startsWith(root)) {
@@ -270,7 +268,8 @@ public class FSDataset implements FSData
}
return false;
}
-
+
+ @Override
public String toString() {
return "FSDir{" +
"dir=" + dir +
@@ -284,7 +283,7 @@ public class FSDataset implements FSData
* Taken together, all BlockPoolSlices sharing a block pool ID across a
* cluster represent a single block pool.
*/
- class BlockPoolSlice {
+ private class BlockPoolSlice {
private final String bpid;
private final FSVolume volume; // volume to which this BlockPool belongs to
private final File currentDir; // StorageDirectory/current/bpid/current
@@ -342,11 +341,7 @@ public class FSDataset implements FSData
File getDirectory() {
return currentDir.getParentFile();
}
-
- File getCurrentDir() {
- return currentDir;
- }
-
+
File getFinalizedDir() {
return finalizedDir.dir;
}
@@ -373,7 +368,7 @@ public class FSDataset implements FSData
*/
File createTmpFile(Block b) throws IOException {
File f = new File(tmpDir, b.getBlockName());
- return FSDataset.createTmpFile(b, f);
+ return DatanodeUtil.createTmpFile(b, f);
}
/**
@@ -382,12 +377,12 @@ public class FSDataset implements FSData
*/
File createRbwFile(Block b) throws IOException {
File f = new File(rbwDir, b.getBlockName());
- return FSDataset.createTmpFile(b, f);
+ return DatanodeUtil.createTmpFile(b, f);
}
File addBlock(Block b, File f) throws IOException {
File blockFile = finalizedDir.addBlock(b, f);
- File metaFile = getMetaFile(blockFile , b.getGenerationStamp());
+ File metaFile = DatanodeUtil.getMetaFile(blockFile, b.getGenerationStamp());
dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
return blockFile;
}
@@ -455,7 +450,7 @@ public class FSDataset implements FSData
DataInputStream checksumIn = null;
InputStream blockIn = null;
try {
- File metaFile = new File(getMetaFileName(blockFile.toString(), genStamp));
+ final File metaFile = DatanodeUtil.getMetaFile(blockFile, genStamp);
long blockFileLen = blockFile.length();
long metaFileLen = metaFile.length();
int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
@@ -470,7 +465,7 @@ public class FSDataset implements FSData
// read and handle the common header here. For now just a version
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
short version = header.getVersion();
- if (version != FSDataset.METADATA_VERSION) {
+ if (version != BlockMetadataHeader.VERSION) {
DataNode.LOG.warn("Wrong version (" + version + ") for metadata file "
+ metaFile + " ignoring ...");
}
@@ -521,7 +516,7 @@ public class FSDataset implements FSData
}
}
- class FSVolume {
+ class FSVolume implements FSVolumeInterface {
private final Map<String, BlockPoolSlice> map = new HashMap<String, BlockPoolSlice>();
private final File currentDir; // <StorageDirectory>/current
private final DF usage;
@@ -534,17 +529,12 @@ public class FSDataset implements FSData
File parent = currentDir.getParentFile();
this.usage = new DF(parent, conf);
}
-
- /** Return storage directory corresponding to the volume */
- public File getDir() {
- return currentDir.getParentFile();
- }
- public File getCurrentDir() {
+ File getCurrentDir() {
return currentDir;
}
- public File getRbwDir(String bpid) throws IOException {
+ File getRbwDir(String bpid) throws IOException {
BlockPoolSlice bp = getBlockPoolSlice(bpid);
return bp.getRbwDir();
}
@@ -583,8 +573,9 @@ public class FSDataset implements FSData
long remaining = usage.getCapacity() - reserved;
return remaining > 0 ? remaining : 0;
}
-
- long getAvailable() throws IOException {
+
+ @Override
+ public long getAvailable() throws IOException {
long remaining = getCapacity()-getDfsUsed();
long available = usage.getAvailable();
if (remaining>available) {
@@ -600,19 +591,30 @@ public class FSDataset implements FSData
String getMount() throws IOException {
return usage.getMount();
}
-
- BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
+
+ private BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
BlockPoolSlice bp = map.get(bpid);
if (bp == null) {
throw new IOException("block pool " + bpid + " is not found");
}
return bp;
}
-
+
+ @Override
+ public File getDirectory(String bpid) throws IOException {
+ return getBlockPoolSlice(bpid).getDirectory();
+ }
+
+ @Override
+ public File getFinalizedDir(String bpid) throws IOException {
+ return getBlockPoolSlice(bpid).getFinalizedDir();
+ }
+
/**
* Make a deep copy of the list of currently active BPIDs
*/
- String[] getBlockPoolList() {
+ @Override
+ public String[] getBlockPoolList() {
synchronized(FSDataset.this) {
return map.keySet().toArray(new String[map.keySet().size()]);
}
@@ -681,7 +683,8 @@ public class FSDataset implements FSData
BlockPoolSlice bp = getBlockPoolSlice(bpid);
bp.clearPath(f);
}
-
+
+ @Override
public String toString() {
return currentDir.getAbsolutePath();
}
@@ -773,21 +776,18 @@ public class FSDataset implements FSData
* Read access to this unmodifiable list is not synchronized.
* This list is replaced on modification holding "this" lock.
*/
- private volatile List<FSVolume> volumes = null;
+ private volatile List<FSVolumeInterface> volumes = null;
+
BlockVolumeChoosingPolicy blockChooser;
int numFailedVolumes;
- FSVolumeSet(FSVolume[] volumes, int failedVols, BlockVolumeChoosingPolicy blockChooser) {
- List<FSVolume> list = Arrays.asList(volumes);
- this.volumes = Collections.unmodifiableList(list);
+ FSVolumeSet(List<FSVolumeInterface> volumes, int failedVols,
+ BlockVolumeChoosingPolicy blockChooser) {
+ this.volumes = Collections.unmodifiableList(volumes);
this.blockChooser = blockChooser;
this.numFailedVolumes = failedVols;
}
- private int numberOfVolumes() {
- return volumes.size();
- }
-
private int numberOfFailedVolumes() {
return numFailedVolumes;
}
@@ -800,36 +800,36 @@ public class FSDataset implements FSData
* @return next volume to store the block in.
*/
synchronized FSVolume getNextVolume(long blockSize) throws IOException {
- return blockChooser.chooseVolume(volumes, blockSize);
+ return (FSVolume)blockChooser.chooseVolume(volumes, blockSize);
}
private long getDfsUsed() throws IOException {
long dfsUsed = 0L;
- for (FSVolume vol : volumes) {
- dfsUsed += vol.getDfsUsed();
+ for (FSVolumeInterface v : volumes) {
+ dfsUsed += ((FSVolume)v).getDfsUsed();
}
return dfsUsed;
}
private long getBlockPoolUsed(String bpid) throws IOException {
long dfsUsed = 0L;
- for (FSVolume vol : volumes) {
- dfsUsed += vol.getBlockPoolUsed(bpid);
+ for (FSVolumeInterface v : volumes) {
+ dfsUsed += ((FSVolume)v).getBlockPoolUsed(bpid);
}
return dfsUsed;
}
private long getCapacity() {
long capacity = 0L;
- for (FSVolume vol : volumes) {
- capacity += vol.getCapacity();
+ for (FSVolumeInterface v : volumes) {
+ capacity += ((FSVolume)v).getCapacity();
}
return capacity;
}
private long getRemaining() throws IOException {
long remaining = 0L;
- for (FSVolume vol : volumes) {
+ for (FSVolumeInterface vol : volumes) {
remaining += vol.getAvailable();
}
return remaining;
@@ -837,15 +837,15 @@ public class FSDataset implements FSData
private void getVolumeMap(ReplicasMap volumeMap)
throws IOException {
- for (FSVolume vol : volumes) {
- vol.getVolumeMap(volumeMap);
+ for (FSVolumeInterface v : volumes) {
+ ((FSVolume)v).getVolumeMap(volumeMap);
}
}
private void getVolumeMap(String bpid, ReplicasMap volumeMap)
throws IOException {
- for (FSVolume vol : volumes) {
- vol.getVolumeMap(bpid, volumeMap);
+ for (FSVolumeInterface v : volumes) {
+ ((FSVolume)v).getVolumeMap(bpid, volumeMap);
}
}
@@ -861,10 +861,10 @@ public class FSDataset implements FSData
ArrayList<FSVolume> removedVols = null;
// Make a copy of volumes for performing modification
- List<FSVolume> volumeList = new ArrayList<FSVolume>(getVolumes());
+ final List<FSVolumeInterface> volumeList = new ArrayList<FSVolumeInterface>(volumes);
for (int idx = 0; idx < volumeList.size(); idx++) {
- FSVolume fsv = volumeList.get(idx);
+ FSVolume fsv = (FSVolume)volumeList.get(idx);
try {
fsv.checkDirs();
} catch (DiskErrorException e) {
@@ -881,8 +881,8 @@ public class FSDataset implements FSData
// Remove null volumes from the volumes array
if (removedVols != null && removedVols.size() > 0) {
- List<FSVolume> newVols = new ArrayList<FSVolume>();
- for (FSVolume vol : volumeList) {
+ List<FSVolumeInterface> newVols = new ArrayList<FSVolumeInterface>();
+ for (FSVolumeInterface vol : volumeList) {
if (vol != null) {
newVols.add(vol);
}
@@ -895,44 +895,30 @@ public class FSDataset implements FSData
return removedVols;
}
-
+
+ @Override
public String toString() {
return volumes.toString();
}
- boolean isValid(FSVolume volume) {
- for (FSVolume vol : volumes) {
- if (vol == volume) {
- return true;
- }
- }
- return false;
- }
private void addBlockPool(String bpid, Configuration conf)
throws IOException {
- for (FSVolume v : volumes) {
- v.addBlockPool(bpid, conf);
+ for (FSVolumeInterface v : volumes) {
+ ((FSVolume)v).addBlockPool(bpid, conf);
}
}
private void removeBlockPool(String bpid) {
- for (FSVolume v : volumes) {
- v.shutdownBlockPool(bpid);
+ for (FSVolumeInterface v : volumes) {
+ ((FSVolume)v).shutdownBlockPool(bpid);
}
}
-
- /**
- * @return unmodifiable list of volumes
- */
- public List<FSVolume> getVolumes() {
- return volumes;
- }
private void shutdown() {
- for (FSVolume volume : volumes) {
+ for (FSVolumeInterface volume : volumes) {
if(volume != null) {
- volume.shutdown();
+ ((FSVolume)volume).shutdown();
}
}
}
@@ -944,36 +930,20 @@ public class FSDataset implements FSData
//
//////////////////////////////////////////////////////
- //Find better place?
- public static final String METADATA_EXTENSION = ".meta";
- public static final short METADATA_VERSION = 1;
- static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
-
private static boolean isUnlinkTmpFile(File f) {
String name = f.getName();
- return name.endsWith(UNLINK_BLOCK_SUFFIX);
- }
-
- static File getUnlinkTmpFile(File f) {
- return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
+ return name.endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX);
}
private static File getOrigFile(File unlinkTmpFile) {
String fileName = unlinkTmpFile.getName();
return new File(unlinkTmpFile.getParentFile(),
- fileName.substring(0, fileName.length()-UNLINK_BLOCK_SUFFIX.length()));
- }
-
- static String getMetaFileName(String blockFileName, long genStamp) {
- return blockFileName + "_" + genStamp + METADATA_EXTENSION;
- }
-
- static File getMetaFile(File f , long genStamp) {
- return new File(getMetaFileName(f.getAbsolutePath(), genStamp));
+ fileName.substring(0,
+ fileName.length() - DatanodeUtil.UNLINK_BLOCK_SUFFIX.length()));
}
protected File getMetaFile(ExtendedBlock b) throws IOException {
- return getMetaFile(getBlockFile(b), b.getGenerationStamp());
+ return DatanodeUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
}
/** Find the metadata file for the specified block file.
@@ -995,34 +965,13 @@ public class FSDataset implements FSData
" does not have a metafile!");
return GenerationStamp.GRANDFATHER_GENERATION_STAMP;
}
-
- /** Find the corresponding meta data file from a given block file */
- private static File findMetaFile(final File blockFile) throws IOException {
- final String prefix = blockFile.getName() + "_";
- final File parent = blockFile.getParentFile();
- File[] matches = parent.listFiles(new FilenameFilter() {
- public boolean accept(File dir, String name) {
- return dir.equals(parent)
- && name.startsWith(prefix) && name.endsWith(METADATA_EXTENSION);
- }
- });
-
- if (matches == null || matches.length == 0) {
- throw new IOException("Meta file not found, blockFile=" + blockFile);
- }
- else if (matches.length > 1) {
- throw new IOException("Found more than one meta files: "
- + Arrays.asList(matches));
- }
- return matches[0];
- }
/** Find the corresponding meta data file from a given block file */
private static long parseGenerationStamp(File blockFile, File metaFile
) throws IOException {
String metaname = metaFile.getName();
String gs = metaname.substring(blockFile.getName().length() + 1,
- metaname.length() - METADATA_EXTENSION.length());
+ metaname.length() - DatanodeUtil.METADATA_EXTENSION.length());
try {
return Long.parseLong(gs);
} catch(NumberFormatException nfe) {
@@ -1031,19 +980,19 @@ public class FSDataset implements FSData
}
}
- /** Return the block file for the given ID */
- public File findBlockFile(String bpid, long blockId) {
- return getFile(bpid, blockId);
+ @Override // FSDatasetInterface
+ public List<FSVolumeInterface> getVolumes() {
+ return volumes.volumes;
}
@Override // FSDatasetInterface
public synchronized Block getStoredBlock(String bpid, long blkid)
throws IOException {
- File blockfile = findBlockFile(bpid, blkid);
+ File blockfile = getFile(bpid, blkid);
if (blockfile == null) {
return null;
}
- File metafile = findMetaFile(blockfile);
+ final File metafile = DatanodeUtil.findMetaFile(blockfile);
return new Block(blkid, blockfile.length(),
parseGenerationStamp(blockfile, metafile));
}
@@ -1091,26 +1040,6 @@ public class FSDataset implements FSData
return new MetaDataInputStream(new FileInputStream(checksumFile),
checksumFile.length());
}
-
- static File createTmpFile(Block b, File f) throws IOException {
- if (f.exists()) {
- throw new IOException("Unexpected problem in creating temporary file for "+
- b + ". File " + f + " should not be present, but is.");
- }
- // Create the zero-length temp file
- //
- boolean fileCreated = false;
- try {
- fileCreated = f.createNewFile();
- } catch (IOException ioe) {
- throw (IOException)new IOException(DISK_ERROR +f).initCause(ioe);
- }
- if (!fileCreated) {
- throw new IOException("Unexpected problem in creating temporary file for "+
- b + ". File " + f + " should be creatable, but is already present.");
- }
- return f;
- }
private final DataNode datanode;
final FSVolumeSet volumes;
@@ -1127,7 +1056,7 @@ public class FSDataset implements FSData
/**
* An FSDataset has a directory where it loads its data files.
*/
- public FSDataset(DataNode datanode, DataStorage storage, Configuration conf)
+ FSDataset(DataNode datanode, DataStorage storage, Configuration conf)
throws IOException {
this.datanode = datanode;
this.maxBlocksPerDir =
@@ -1160,12 +1089,12 @@ public class FSDataset implements FSData
+ ", volume failures tolerated: " + volFailuresTolerated);
}
- FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
+ final List<FSVolumeInterface> volArray = new ArrayList<FSVolumeInterface>(
+ storage.getNumStorageDirs());
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
- volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(),
- conf);
- DataNode.LOG.info("FSDataset added volume - "
- + storage.getStorageDir(idx).getCurrentDir());
+ final File dir = storage.getStorageDir(idx).getCurrentDir();
+ volArray.add(new FSVolume(dir, conf));
+ DataNode.LOG.info("FSDataset added volume - " + dir);
}
volumeMap = new ReplicasMap(this);
@@ -1211,7 +1140,7 @@ public class FSDataset implements FSData
*/
@Override // FSDatasetInterface
public boolean hasEnoughResource() {
- return volumes.numberOfVolumes() >= validVolsRequired;
+ return getVolumes().size() >= validVolsRequired;
}
/**
@@ -1252,15 +1181,14 @@ public class FSDataset implements FSData
/**
* Get File name for a given block.
*/
- public File getBlockFile(ExtendedBlock b) throws IOException {
+ private File getBlockFile(ExtendedBlock b) throws IOException {
return getBlockFile(b.getBlockPoolId(), b.getLocalBlock());
}
/**
* Get File name for a given block.
*/
- public File getBlockFile(String bpid, Block b)
- throws IOException {
+ File getBlockFile(String bpid, Block b) throws IOException {
File f = validateBlockFile(bpid, b);
if(f == null) {
if (DataNode.LOG.isDebugEnabled()) {
@@ -1291,7 +1219,10 @@ public class FSDataset implements FSData
*/
private File getBlockFileNoExistsCheck(ExtendedBlock b)
throws IOException {
- File f = getFile(b.getBlockPoolId(), b.getLocalBlock());
+ final File f;
+ synchronized(this) {
+ f = getFile(b.getBlockPoolId(), b.getLocalBlock().getBlockId());
+ }
if (f == null) {
throw new IOException("Block " + b + " is not valid");
}
@@ -1324,7 +1255,7 @@ public class FSDataset implements FSData
* @throws ReplicaNotFoundException if no entry is in the map or
* there is a generation stamp mismatch
*/
- private ReplicaInfo getReplicaInfo(ExtendedBlock b)
+ ReplicaInfo getReplicaInfo(ExtendedBlock b)
throws ReplicaNotFoundException {
ReplicaInfo info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
if (info == null) {
@@ -1392,8 +1323,8 @@ public class FSDataset implements FSData
private static File moveBlockFiles(Block b, File srcfile, File destdir
) throws IOException {
final File dstfile = new File(destdir, b.getBlockName());
- final File srcmeta = getMetaFile(srcfile, b.getGenerationStamp());
- final File dstmeta = getMetaFile(dstfile, b.getGenerationStamp());
+ final File srcmeta = DatanodeUtil.getMetaFile(srcfile, b.getGenerationStamp());
+ final File dstmeta = DatanodeUtil.getMetaFile(dstfile, b.getGenerationStamp());
if (!srcmeta.renameTo(dstmeta)) {
throw new IOException("Failed to move meta file for " + b
+ " from " + srcmeta + " to " + dstmeta);
@@ -1460,19 +1391,6 @@ public class FSDataset implements FSData
}
}
- private final static String DISK_ERROR = "Possible disk error on file creation: ";
- /** Get the cause of an I/O exception if caused by a possible disk error
- * @param ioe an I/O exception
- * @return cause if the I/O exception is caused by a possible disk error;
- * null otherwise.
- */
- static IOException getCauseIfDiskError(IOException ioe) {
- if (ioe.getMessage()!=null && ioe.getMessage().startsWith(DISK_ERROR)) {
- return (IOException)ioe.getCause();
- } else {
- return null;
- }
- }
@Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface append(ExtendedBlock b,
@@ -1524,7 +1442,7 @@ public class FSDataset implements FSData
// construct a RBW replica with the new GS
File blkfile = replicaInfo.getBlockFile();
- FSVolume v = replicaInfo.getVolume();
+ FSVolume v = (FSVolume)replicaInfo.getVolume();
if (v.getAvailable() < estimateBlockLen - replicaInfo.getNumBytes()) {
throw new DiskOutOfSpaceException("Insufficient space for appending to "
+ replicaInfo);
@@ -1781,7 +1699,7 @@ public class FSDataset implements FSData
+ visible + ", temp=" + temp);
}
// check volume
- final FSVolume v = temp.getVolume();
+ final FSVolume v = (FSVolume)temp.getVolume();
if (v == null) {
throw new IOException("r.getVolume() = null, temp=" + temp);
}
@@ -1842,7 +1760,7 @@ public class FSDataset implements FSData
if ( vol == null ) {
ReplicaInfo replica = volumeMap.get(bpid, blk);
if (replica != null) {
- vol = volumeMap.get(bpid, blk).getVolume();
+ vol = (FSVolume)volumeMap.get(bpid, blk).getVolume();
}
if ( vol == null ) {
throw new IOException("Could not find volume for block " + blk);
@@ -1882,7 +1800,7 @@ public class FSDataset implements FSData
newReplicaInfo = (FinalizedReplica)
((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
} else {
- FSVolume v = replicaInfo.getVolume();
+ FSVolume v = (FSVolume)replicaInfo.getVolume();
File f = replicaInfo.getBlockFile();
if (v == null) {
throw new IOException("No volume for temporary file " + f +
@@ -1980,7 +1898,8 @@ public class FSDataset implements FSData
/**
* Get the list of finalized blocks from in-memory blockmap for a block pool.
*/
- synchronized List<Block> getFinalizedBlocks(String bpid) {
+ @Override
+ public synchronized List<Block> getFinalizedBlocks(String bpid) {
ArrayList<Block> finalized = new ArrayList<Block>(volumeMap.size(bpid));
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
if(b.getState() == ReplicaState.FINALIZED) {
@@ -2021,7 +1940,10 @@ public class FSDataset implements FSData
*/
File validateBlockFile(String bpid, Block b) {
//Should we check for metadata file too?
- File f = getFile(bpid, b);
+ final File f;
+ synchronized(this) {
+ f = getFile(bpid, b.getBlockId());
+ }
if(f != null ) {
if(f.exists())
@@ -2050,7 +1972,7 @@ public class FSDataset implements FSData
}
//check replica's meta file
- final File metafile = getMetaFile(f, r.getGenerationStamp());
+ final File metafile = DatanodeUtil.getMetaFile(f, r.getGenerationStamp());
if (!metafile.exists()) {
throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
}
@@ -2071,7 +1993,7 @@ public class FSDataset implements FSData
File f = null;
FSVolume v;
synchronized (this) {
- f = getFile(bpid, invalidBlks[i]);
+ f = getFile(bpid, invalidBlks[i].getBlockId());
ReplicaInfo dinfo = volumeMap.get(bpid, invalidBlks[i]);
if (dinfo == null ||
dinfo.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) {
@@ -2081,7 +2003,7 @@ public class FSDataset implements FSData
error = true;
continue;
}
- v = dinfo.getVolume();
+ v = (FSVolume)dinfo.getVolume();
if (f == null) {
DataNode.LOG.warn("Unexpected error trying to delete block "
+ invalidBlks[i] +
@@ -2115,7 +2037,7 @@ public class FSDataset implements FSData
}
volumeMap.remove(bpid, invalidBlks[i]);
}
- File metaFile = getMetaFile(f, invalidBlks[i].getGenerationStamp());
+ File metaFile = DatanodeUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp());
// Delete the block asynchronously to make sure we can do it fast enough
asyncDiskService.deleteAsync(v, f, metaFile,
@@ -2130,11 +2052,10 @@ public class FSDataset implements FSData
datanode.notifyNamenodeDeletedBlock(block);
}
- /**
- * Turn the block identifier into a filename; ignore generation stamp!!!
- */
- public synchronized File getFile(String bpid, Block b) {
- return getFile(bpid, b.getBlockId());
+ @Override // {@link FSDatasetInterface}
+ public synchronized boolean contains(final ExtendedBlock block) {
+ final long blockId = block.getLocalBlock().getBlockId();
+ return getFile(block.getBlockPoolId(), blockId) != null;
}
/**
@@ -2143,7 +2064,7 @@ public class FSDataset implements FSData
* @param blockId a block's id
* @return on disk data file path; null if the replica does not exist
*/
- private File getFile(String bpid, long blockId) {
+ File getFile(final String bpid, final long blockId) {
ReplicaInfo info = volumeMap.get(bpid, blockId);
if (info != null) {
return info.getBlockFile();
@@ -2273,8 +2194,9 @@ public class FSDataset implements FSData
* @param diskMetaFile Metadata file from on the disk
* @param vol Volume of the block file
*/
+ @Override
public void checkAndUpdate(String bpid, long blockId, File diskFile,
- File diskMetaFile, FSVolume vol) {
+ File diskMetaFile, FSVolumeInterface vol) {
Block corruptBlock = null;
ReplicaInfo memBlockInfo;
synchronized (this) {
@@ -2362,7 +2284,7 @@ public class FSDataset implements FSData
// Compare generation stamp
if (memBlockInfo.getGenerationStamp() != diskGS) {
- File memMetaFile = getMetaFile(diskFile,
+ File memMetaFile = DatanodeUtil.getMetaFile(diskFile,
memBlockInfo.getGenerationStamp());
if (memMetaFile.exists()) {
if (memMetaFile.compareTo(diskMetaFile) != 0) {
@@ -2597,18 +2519,15 @@ public class FSDataset implements FSData
volumes.removeBlockPool(bpid);
}
- /**
- * get list of all bpids
- * @return list of bpids
- */
- public String [] getBPIdlist() {
+ @Override
+ public String[] getBlockPoolList() {
return volumeMap.getBlockPoolList();
}
/**
* Class for representing the Datanode volume information
*/
- static class VolumeInfo {
+ private static class VolumeInfo {
final String directory;
final long usedSpace;
final long freeSpace;
@@ -2621,10 +2540,11 @@ public class FSDataset implements FSData
this.reservedSpace = reservedSpace;
}
}
-
- Collection<VolumeInfo> getVolumeInfo() {
+
+ private Collection<VolumeInfo> getVolumeInfo() {
Collection<VolumeInfo> info = new ArrayList<VolumeInfo>();
- for (FSVolume volume : volumes.volumes) {
+ for (FSVolumeInterface v : volumes.volumes) {
+ final FSVolume volume = (FSVolume)v;
long used = 0;
long free = 0;
try {
@@ -2641,13 +2561,27 @@ public class FSDataset implements FSData
}
return info;
}
-
+
+ @Override
+ public Map<String, Object> getVolumeInfoMap() {
+ final Map<String, Object> info = new HashMap<String, Object>();
+ Collection<VolumeInfo> volumes = getVolumeInfo();
+ for (VolumeInfo v : volumes) {
+ final Map<String, Object> innerInfo = new HashMap<String, Object>();
+ innerInfo.put("usedSpace", v.usedSpace);
+ innerInfo.put("freeSpace", v.freeSpace);
+ innerInfo.put("reservedSpace", v.reservedSpace);
+ info.put(v.directory, innerInfo);
+ }
+ return info;
+ }
+
@Override //FSDatasetInterface
public synchronized void deleteBlockPool(String bpid, boolean force)
throws IOException {
if (!force) {
- for (FSVolume volume : volumes.volumes) {
- if (!volume.isBPDirEmpty(bpid)) {
+ for (FSVolumeInterface volume : volumes.volumes) {
+ if (!((FSVolume)volume).isBPDirEmpty(bpid)) {
DataNode.LOG.warn(bpid
+ " has some block files, cannot delete unless forced");
throw new IOException("Cannot delete block pool, "
@@ -2655,8 +2589,8 @@ public class FSDataset implements FSData
}
}
}
- for (FSVolume volume : volumes.volumes) {
- volume.deleteBPDirectories(bpid, force);
+ for (FSVolumeInterface volume : volumes.volumes) {
+ ((FSVolume)volume).deleteBPDirectories(bpid, force);
}
}
@@ -2664,7 +2598,7 @@ public class FSDataset implements FSData
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
throws IOException {
File datafile = getBlockFile(block);
- File metafile = getMetaFile(datafile, block.getGenerationStamp());
+ File metafile = DatanodeUtil.getMetaFile(datafile, block.getGenerationStamp());
BlockLocalPathInfo info = new BlockLocalPathInfo(block,
datafile.getAbsolutePath(), metafile.getAbsolutePath());
return info;
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java Fri Feb 10 01:49:08 2012
@@ -24,16 +24,18 @@ import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.util.List;
+import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
-import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
-import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -47,8 +49,44 @@ import org.apache.hadoop.util.DiskChecke
*/
@InterfaceAudience.Private
public interface FSDatasetInterface extends FSDatasetMBean {
-
-
+ /**
+ * This is an interface for the underlying volume.
+ * @see org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume
+ */
+ interface FSVolumeInterface {
+ /** @return a list of block pools. */
+ public String[] getBlockPoolList();
+
+ /** @return the available storage space in bytes. */
+ public long getAvailable() throws IOException;
+
+ /** @return the directory for the block pool. */
+ public File getDirectory(String bpid) throws IOException;
+
+ /** @return the directory for the finalized blocks in the block pool. */
+ public File getFinalizedDir(String bpid) throws IOException;
+ }
+
+ /** @return a list of volumes. */
+ public List<FSVolumeInterface> getVolumes();
+
+ /** @return a volume information map (name => info). */
+ public Map<String, Object> getVolumeInfoMap();
+
+ /** @return a list of block pools. */
+ public String[] getBlockPoolList();
+
+ /** @return a list of finalized blocks for the given block pool. */
+ public List<Block> getFinalizedBlocks(String bpid);
+
+ /**
+ * Check whether the in-memory block record matches the block on the disk,
+ * and, in case that they are not matched, update the record or mark it
+ * as corrupted.
+ */
+ public void checkAndUpdate(String bpid, long blockId, File diskFile,
+ File diskMetaFile, FSVolumeInterface vol);
+
/**
* Returns the length of the metadata file of the specified block
* @param b - the block for which the metadata length is desired
@@ -303,6 +341,9 @@ public interface FSDatasetInterface exte
*/
public BlockListAsLongs getBlockReport(String bpid);
+ /** Does the dataset contain the block? */
+ public boolean contains(ExtendedBlock block);
+
/**
* Is the block valid?
* @param b
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java Fri Feb 10 01:49:08 2012
@@ -21,7 +21,7 @@ import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/**
* This class describes a replica that has been finalized.
@@ -38,7 +38,7 @@ class FinalizedReplica extends ReplicaIn
* @param dir directory path where block and meta files are located
*/
FinalizedReplica(long blockId, long len, long genStamp,
- FSVolume vol, File dir) {
+ FSVolumeInterface vol, File dir) {
super(blockId, len, genStamp, vol, dir);
}
@@ -48,7 +48,7 @@ class FinalizedReplica extends ReplicaIn
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
- FinalizedReplica(Block block, FSVolume vol, File dir) {
+ FinalizedReplica(Block block, FSVolumeInterface vol, File dir) {
super(block, vol, dir);
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java Fri Feb 10 01:49:08 2012
@@ -21,7 +21,7 @@ import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/** This class represents replicas being written.
* Those are the replicas that
@@ -36,7 +36,7 @@ class ReplicaBeingWritten extends Replic
* @param dir directory path where block and meta files are located
*/
ReplicaBeingWritten(long blockId, long genStamp,
- FSVolume vol, File dir) {
+ FSVolumeInterface vol, File dir) {
super( blockId, genStamp, vol, dir);
}
@@ -48,7 +48,7 @@ class ReplicaBeingWritten extends Replic
* @param writer a thread that is writing to this replica
*/
ReplicaBeingWritten(Block block,
- FSVolume vol, File dir, Thread writer) {
+ FSVolumeInterface vol, File dir, Thread writer) {
super( block, vol, dir, writer);
}
@@ -62,7 +62,7 @@ class ReplicaBeingWritten extends Replic
* @param writer a thread that is writing to this replica
*/
ReplicaBeingWritten(long blockId, long len, long genStamp,
- FSVolume vol, File dir, Thread writer ) {
+ FSVolumeInterface vol, File dir, Thread writer ) {
super( blockId, len, genStamp, vol, dir, writer);
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java Fri Feb 10 01:49:08 2012
@@ -24,8 +24,8 @@ import java.io.RandomAccessFile;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum;
@@ -53,7 +53,7 @@ class ReplicaInPipeline extends ReplicaI
* @param state replica state
*/
ReplicaInPipeline(long blockId, long genStamp,
- FSVolume vol, File dir) {
+ FSVolumeInterface vol, File dir) {
this( blockId, 0L, genStamp, vol, dir, Thread.currentThread());
}
@@ -65,7 +65,7 @@ class ReplicaInPipeline extends ReplicaI
* @param writer a thread that is writing to this replica
*/
ReplicaInPipeline(Block block,
- FSVolume vol, File dir, Thread writer) {
+ FSVolumeInterface vol, File dir, Thread writer) {
this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
vol, dir, writer);
}
@@ -80,7 +80,7 @@ class ReplicaInPipeline extends ReplicaI
* @param writer a thread that is writing to this replica
*/
ReplicaInPipeline(long blockId, long len, long genStamp,
- FSVolume vol, File dir, Thread writer ) {
+ FSVolumeInterface vol, File dir, Thread writer ) {
super( blockId, len, genStamp, vol, dir);
this.bytesAcked = len;
this.bytesOnDisk = len;
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java Fri Feb 10 01:49:08 2012
@@ -26,7 +26,7 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.io.IOUtils;
/**
@@ -35,8 +35,10 @@ import org.apache.hadoop.io.IOUtils;
*/
@InterfaceAudience.Private
abstract public class ReplicaInfo extends Block implements Replica {
- private FSVolume volume; // volume where the replica belongs
- private File dir; // directory where block & meta files belong
+ /** volume where the replica belongs */
+ private FSVolumeInterface volume;
+ /** directory where block & meta files belong */
+ private File dir;
/**
* Constructor for a zero length replica
@@ -45,7 +47,7 @@ abstract public class ReplicaInfo extend
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
- ReplicaInfo(long blockId, long genStamp, FSVolume vol, File dir) {
+ ReplicaInfo(long blockId, long genStamp, FSVolumeInterface vol, File dir) {
this( blockId, 0L, genStamp, vol, dir);
}
@@ -55,7 +57,7 @@ abstract public class ReplicaInfo extend
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
- ReplicaInfo(Block block, FSVolume vol, File dir) {
+ ReplicaInfo(Block block, FSVolumeInterface vol, File dir) {
this(block.getBlockId(), block.getNumBytes(),
block.getGenerationStamp(), vol, dir);
}
@@ -69,7 +71,7 @@ abstract public class ReplicaInfo extend
* @param dir directory path where block and meta files are located
*/
ReplicaInfo(long blockId, long len, long genStamp,
- FSVolume vol, File dir) {
+ FSVolumeInterface vol, File dir) {
super(blockId, len, genStamp);
this.volume = vol;
this.dir = dir;
@@ -111,14 +113,14 @@ abstract public class ReplicaInfo extend
* Get the volume where this replica is located on disk
* @return the volume where this replica is located on disk
*/
- FSVolume getVolume() {
+ FSVolumeInterface getVolume() {
return volume;
}
/**
* Set the volume where this replica is located on disk
*/
- void setVolume(FSVolume vol) {
+ void setVolume(FSVolumeInterface vol) {
this.volume = vol;
}
@@ -162,7 +164,7 @@ abstract public class ReplicaInfo extend
* be recovered (especially on Windows) on datanode restart.
*/
private void unlinkFile(File file, Block b) throws IOException {
- File tmpFile = FSDataset.createTmpFile(b, FSDataset.getUnlinkTmpFile(file));
+ File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file));
try {
FileInputStream in = new FileInputStream(file);
try {
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java Fri Feb 10 01:49:08 2012
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.da
import java.io.File;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
/**
@@ -145,7 +145,7 @@ class ReplicaUnderRecovery extends Repli
}
@Override //ReplicaInfo
- void setVolume(FSVolume vol) {
+ void setVolume(FSVolumeInterface vol) {
super.setVolume(vol);
original.setVolume(vol);
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java Fri Feb 10 01:49:08 2012
@@ -21,7 +21,7 @@ import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
/**
* This class represents a replica that is waiting to be recovered.
@@ -44,7 +44,7 @@ class ReplicaWaitingToBeRecovered extend
* @param dir directory path where block and meta files are located
*/
ReplicaWaitingToBeRecovered(long blockId, long len, long genStamp,
- FSVolume vol, File dir) {
+ FSVolumeInterface vol, File dir) {
super(blockId, len, genStamp, vol, dir);
}
@@ -54,7 +54,7 @@ class ReplicaWaitingToBeRecovered extend
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
- ReplicaWaitingToBeRecovered(Block block, FSVolume vol, File dir) {
+ ReplicaWaitingToBeRecovered(Block block, FSVolumeInterface vol, File dir) {
super(block, vol, dir);
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java Fri Feb 10 01:49:08 2012
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.da
import java.io.IOException;
import java.util.List;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
public class RoundRobinVolumesPolicy implements BlockVolumeChoosingPolicy {
@@ -28,8 +28,8 @@ public class RoundRobinVolumesPolicy imp
private int curVolume = 0;
@Override
- public synchronized FSVolume chooseVolume(List<FSVolume> volumes, long blockSize)
- throws IOException {
+ public synchronized FSVolumeInterface chooseVolume(
+ List<FSVolumeInterface> volumes, long blockSize) throws IOException {
if(volumes.size() < 1) {
throw new DiskOutOfSpaceException("No more available volumes");
}
@@ -44,7 +44,7 @@ public class RoundRobinVolumesPolicy imp
long maxAvailable = 0;
while (true) {
- FSVolume volume = volumes.get(curVolume);
+ FSVolumeInterface volume = volumes.get(curVolume);
curVolume = (curVolume + 1) % volumes.size();
long availableVolumeSize = volume.getAvailable();
if (availableVolumeSize > blockSize) { return volume; }
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Fri Feb 10 01:49:08 2012
@@ -266,16 +266,26 @@ public class FSDirectory implements Clos
short replication,
long modificationTime,
long atime,
- long preferredBlockSize)
+ long preferredBlockSize,
+ boolean underConstruction,
+ String clientName,
+ String clientMachine)
throws UnresolvedLinkException {
INode newNode;
assert hasWriteLock();
- newNode = new INodeFile(permissions, new BlockInfo[0], replication,
- modificationTime, atime, preferredBlockSize);
- writeLock();
+ if (underConstruction) {
+ newNode = new INodeFileUnderConstruction(
+ permissions, replication,
+ preferredBlockSize, modificationTime, clientName,
+ clientMachine, null);
+ } else {
+ newNode = new INodeFile(permissions, 0, replication,
+ modificationTime, atime, preferredBlockSize);
+ }
+ writeLock(); // TODO: this is silly, considering the assert above!
try {
try {
- newNode = addNode(path, newNode, 0);
+ newNode = addNode(path, newNode, UNKNOWN_DISK_SPACE);
} catch (IOException e) {
return null;
}
@@ -416,8 +426,8 @@ public class FSDirectory implements Clos
return true;
}
- void unprotectedRemoveBlock(String path,
- INodeFileUnderConstruction fileNode, Block block) throws IOException {
+ void unprotectedRemoveBlock(String path, INodeFileUnderConstruction fileNode,
+ Block block) throws IOException {
// modify file-> block and blocksMap
fileNode.removeLastBlock(block);
getBlockManager().removeBlockFromMap(block);
@@ -430,8 +440,8 @@ public class FSDirectory implements Clos
// update space consumed
INode[] pathINodes = getExistingPathINodes(path);
- updateCount(pathINodes, pathINodes.length - 1, 0,
- - fileNode.getPreferredBlockSize()*fileNode.getReplication(), true);
+ updateCount(pathINodes, pathINodes.length-1, 0,
+ -fileNode.getPreferredBlockSize()*fileNode.getReplication(), true);
}
/**
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Fri Feb 10 01:49:08 2012
@@ -179,10 +179,15 @@ public class FSEditLogLoader {
" clientHolder " + addCloseOp.clientName +
" clientMachine " + addCloseOp.clientMachine);
}
+ // There three cases here:
+ // 1. OP_ADD to create a new file
+ // 2. OP_ADD to update file blocks
+ // 3. OP_ADD to open file for append
// See if the file already exists (persistBlocks call)
INodeFile oldFile = getINodeFile(fsDir, addCloseOp.path);
- if (oldFile == null) { // this is OP_ADD on a new file
+ INodeFile newFile = oldFile;
+ if (oldFile == null) { // this is OP_ADD on a new file (case 1)
// versions > 0 support per file replication
// get name and replication
final short replication = fsNamesys.getBlockManager(
@@ -192,6 +197,12 @@ public class FSEditLogLoader {
permissions = addCloseOp.permissions;
}
long blockSize = addCloseOp.blockSize;
+
+ // Versions of HDFS prior to 0.17 may log an OP_ADD transaction
+ // which includes blocks in it. When we update the minimum
+ // upgrade version to something more recent than 0.17, we can
+ // simplify this code by asserting that OP_ADD transactions
+ // don't have any blocks.
// Older versions of HDFS does not store the block size in inode.
// If the file has more than one block, use the size of the
@@ -207,20 +218,17 @@ public class FSEditLogLoader {
}
}
- // TODO: We should do away with this add-then-replace dance.
-
// add to the file tree
- INodeFile node = (INodeFile)fsDir.unprotectedAddFile(
+ newFile = (INodeFile)fsDir.unprotectedAddFile(
addCloseOp.path, permissions,
replication, addCloseOp.mtime,
- addCloseOp.atime, blockSize);
+ addCloseOp.atime, blockSize,
+ true, addCloseOp.clientName, addCloseOp.clientMachine);
+ fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path);
- fsNamesys.prepareFileForWrite(addCloseOp.path, node,
- addCloseOp.clientName, addCloseOp.clientMachine, null,
- false);
} else { // This is OP_ADD on an existing file
if (!oldFile.isUnderConstruction()) {
- // This is a call to append() on an already-closed file.
+ // This is case 3: a call to append() on an already-closed file.
if (FSNamesystem.LOG.isDebugEnabled()) {
FSNamesystem.LOG.debug("Reopening an already-closed file " +
"for append");
@@ -228,11 +236,13 @@ public class FSEditLogLoader {
fsNamesys.prepareFileForWrite(addCloseOp.path, oldFile,
addCloseOp.clientName, addCloseOp.clientMachine, null,
false);
- oldFile = getINodeFile(fsDir, addCloseOp.path);
+ newFile = getINodeFile(fsDir, addCloseOp.path);
}
-
- updateBlocks(fsDir, addCloseOp, oldFile);
}
+ // Fall-through for case 2.
+ // Regardless of whether it's a new file or an updated file,
+ // update the block list.
+ updateBlocks(fsDir, addCloseOp, newFile);
break;
}
case OP_CLOSE: {
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Feb 10 01:49:08 2012
@@ -4528,7 +4528,12 @@ public class FSNamesystem implements Nam
if (destinationExisted && dinfo.isDir()) {
Path spath = new Path(src);
- overwrite = spath.getParent().toString() + Path.SEPARATOR;
+ Path parent = spath.getParent();
+ if (isRoot(parent)) {
+ overwrite = parent.toString();
+ } else {
+ overwrite = parent.toString() + Path.SEPARATOR;
+ }
replaceBy = dst + Path.SEPARATOR;
} else {
overwrite = src;
@@ -4538,6 +4543,10 @@ public class FSNamesystem implements Nam
leaseManager.changeLease(src, dst, overwrite, replaceBy);
}
+ private boolean isRoot(Path path) {
+ return path.getParent() == null;
+ }
+
/**
* Serializes leases.
*/
@@ -4940,7 +4949,7 @@ public class FSNamesystem implements Nam
*/
@Override // NameNodeMXBean
public String getVersion() {
- return VersionInfo.getVersion();
+ return VersionInfo.getVersion() + ", r" + VersionInfo.getRevision();
}
@Override // NameNodeMXBean
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Fri Feb 10 01:49:08 2012
@@ -43,8 +43,8 @@ public class INodeFileUnderConstruction
String clientName,
String clientMachine,
DatanodeDescriptor clientNode) {
- super(permissions.applyUMask(UMASK), 0, replication, modTime, modTime,
- preferredBlockSize);
+ super(permissions.applyUMask(UMASK), 0, replication,
+ modTime, modTime, preferredBlockSize);
this.clientName = clientName;
this.clientMachine = clientMachine;
this.clientNode = clientNode;
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Fri Feb 10 01:49:08 2012
@@ -92,6 +92,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
@@ -100,8 +101,10 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
@@ -156,8 +159,8 @@ class NameNodeRpcServer implements Namen
this.metrics = NameNode.getNameNodeMetrics();
int handlerCount =
- conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY,
- DFS_DATANODE_HANDLER_COUNT_DEFAULT);
+ conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY,
+ DFS_NAMENODE_HANDLER_COUNT_DEFAULT);
InetSocketAddress socAddr = nn.getRpcServerAddress(conf);
RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
ProtobufRpcEngine.class);
@@ -855,8 +858,8 @@ class NameNodeRpcServer implements Namen
@Override // DatanodeProtocol
- public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg)
- throws IOException {
+ public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg,
+ DatanodeStorage[] storages) throws IOException {
verifyVersion(nodeReg.getVersion());
namesystem.registerDatanode(nodeReg);
@@ -865,19 +868,20 @@ class NameNodeRpcServer implements Namen
@Override // DatanodeProtocol
public HeartbeatResponse sendHeartbeat(DatanodeRegistration nodeReg,
- long capacity, long dfsUsed, long remaining, long blockPoolUsed,
- int xmitsInProgress, int xceiverCount, int failedVolumes)
- throws IOException {
+ StorageReport[] report, int xmitsInProgress, int xceiverCount,
+ int failedVolumes) throws IOException {
verifyRequest(nodeReg);
- return namesystem.handleHeartbeat(nodeReg, capacity, dfsUsed, remaining,
- blockPoolUsed, xceiverCount, xmitsInProgress, failedVolumes);
+ return namesystem.handleHeartbeat(nodeReg, report[0].getCapacity(),
+ report[0].getDfsUsed(), report[0].getRemaining(),
+ report[0].getBlockPoolUsed(), xceiverCount, xmitsInProgress,
+ failedVolumes);
}
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
- String poolId, long[] blocks) throws IOException {
+ String poolId, StorageBlockReport[] reports) throws IOException {
verifyRequest(nodeReg);
- BlockListAsLongs blist = new BlockListAsLongs(blocks);
+ BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
if(stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
+ "from " + nodeReg.getName() + " " + blist.getNumberOfBlocks()
@@ -892,7 +896,7 @@ class NameNodeRpcServer implements Namen
@Override // DatanodeProtocol
public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId,
- ReceivedDeletedBlockInfo[] receivedAndDeletedBlocks) throws IOException {
+ StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) throws IOException {
verifyRequest(nodeReg);
if(stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
@@ -900,7 +904,7 @@ class NameNodeRpcServer implements Namen
+" blocks.");
}
namesystem.getBlockManager().processIncrementalBlockReport(
- nodeReg, poolId, receivedAndDeletedBlocks);
+ nodeReg, poolId, receivedAndDeletedBlocks[0].getBlocks());
}
@Override // DatanodeProtocol
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java Fri Feb 10 01:49:08 2012
@@ -25,8 +25,6 @@ import org.apache.hadoop.hdfs.DFSConfigK
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
-import org.apache.hadoop.hdfs.server.protocolR23Compatible.DatanodeWireProtocol;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
@@ -48,10 +46,11 @@ public interface DatanodeProtocol extend
* to insulate from the protocol serialization.
*
* If you are adding/changing DN's interface then you need to
- * change both this class and ALSO
- * {@link DatanodeWireProtocol}.
- * These changes need to be done in a compatible fashion as described in
- * {@link ClientNamenodeWireProtocol}
+ * change both this class and ALSO related protocol buffer
+ * wire protocol definition in DatanodeProtocol.proto.
+ *
+ * For more details on protocol buffer wire protocol, please see
+ * .../org/apache/hadoop/hdfs/protocolPB/overview.html
*/
public static final long versionID = 28L;
@@ -81,13 +80,16 @@ public interface DatanodeProtocol extend
* Register Datanode.
*
* @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
- *
+ * @param registration datanode registration information
+ * @param storages list of storages on the datanode``
* @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains
* new storageID if the datanode did not have one and
* registration ID for further communication.
*/
- public DatanodeRegistration registerDatanode(DatanodeRegistration registration
- ) throws IOException;
+ public DatanodeRegistration registerDatanode(
+ DatanodeRegistration registration, DatanodeStorage[] storages)
+ throws IOException;
+
/**
* sendHeartbeat() tells the NameNode that the DataNode is still
* alive and well. Includes some status info, too.
@@ -96,19 +98,14 @@ public interface DatanodeProtocol extend
* A DatanodeCommand tells the DataNode to invalidate local block(s),
* or to copy them to other DataNodes, etc.
* @param registration datanode registration information
- * @param capacity total storage capacity available at the datanode
- * @param dfsUsed storage used by HDFS
- * @param remaining remaining storage available for HDFS
- * @param blockPoolUsed storage used by the block pool
+ * @param reports utilization report per storage
* @param xmitsInProgress number of transfers from this datanode to others
* @param xceiverCount number of active transceiver threads
* @param failedVolumes number of failed volumes
* @throws IOException on error
*/
public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration,
- long capacity,
- long dfsUsed, long remaining,
- long blockPoolUsed,
+ StorageReport[] reports,
int xmitsInProgress,
int xceiverCount,
int failedVolumes) throws IOException;
@@ -121,7 +118,7 @@ public interface DatanodeProtocol extend
* infrequently afterwards.
* @param registration
* @param poolId - the block pool ID for the blocks
- * @param blocks - the block list as an array of longs.
+ * @param reports - report of blocks per storage
* Each finalized block is represented as 3 longs. Each under-
* construction replica is represented as 4 longs.
* This is done instead of Block[] to reduce memory used by block reports.
@@ -130,8 +127,7 @@ public interface DatanodeProtocol extend
* @throws IOException
*/
public DatanodeCommand blockReport(DatanodeRegistration registration,
- String poolId,
- long[] blocks) throws IOException;
+ String poolId, StorageBlockReport[] reports) throws IOException;
/**
* blockReceivedAndDeleted() allows the DataNode to tell the NameNode about
@@ -145,7 +141,7 @@ public interface DatanodeProtocol extend
*/
public void blockReceivedAndDeleted(DatanodeRegistration registration,
String poolId,
- ReceivedDeletedBlockInfo[] receivedAndDeletedBlocks)
+ StorageReceivedDeletedBlocks[] rcvdAndDeletedBlocks)
throws IOException;
/**
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java Fri Feb 10 01:49:08 2012
@@ -25,9 +25,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
-import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeWireProtocol;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
@@ -52,10 +50,11 @@ public interface InterDatanodeProtocol e
* serialization.
*
* If you are adding/changing DN's interface then you need to
- * change both this class and ALSO
- * {@link InterDatanodeWireProtocol}
- * These changes need to be done in a compatible fashion as described in
- * {@link ClientNamenodeWireProtocol}
+ * change both this class and ALSO related protocol buffer
+ * wire protocol definition in InterDatanodeProtocol.proto.
+ *
+ * For more details on protocol buffer wire protocol, please see
+ * .../org/apache/hadoop/hdfs/protocolPB/overview.html
*
* The log of historical changes can be retrieved from the svn).
* 6: Add block pool ID to Block
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java Fri Feb 10 01:49:08 2012
@@ -21,8 +21,6 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
-import org.apache.hadoop.hdfs.protocolR23Compatible.JournalWireProtocol;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
@@ -41,10 +39,11 @@ public interface JournalProtocol extends
* to insulate from the protocol serialization.
*
* If you are adding/changing DN's interface then you need to
- * change both this class and ALSO
- * {@link JournalWireProtocol}.
- * These changes need to be done in a compatible fashion as described in
- * {@link ClientNamenodeWireProtocol}
+ * change both this class and ALSO related protocol buffer
+ * wire protocol definition in JournalProtocol.proto.
+ *
+ * For more details on protocol buffer wire protocol, please see
+ * .../org/apache/hadoop/hdfs/protocolPB/overview.html
*/
public static final long versionID = 1L;
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java Fri Feb 10 01:49:08 2012
@@ -49,10 +49,11 @@ public interface NamenodeProtocol extend
* NN server side to insulate from the protocol serialization.
*
* If you are adding/changing NN's interface then you need to
- * change both this class and ALSO
- * {@link org.apache.hadoop.hdfs.protocolR23Compatible.NamenodeWireProtocol}.
- * These changes need to be done in a compatible fashion as described in
- * {@link org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol}
+ * change both this class and ALSO related protocol buffer
+ * wire protocol definition in NamenodeProtocol.proto.
+ *
+ * For more details on protocol buffer wire protocol, please see
+ * .../org/apache/hadoop/hdfs/protocolPB/overview.html
*
* 6: Switch to txid-based file naming for image and edits
*/
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Fri Feb 10 01:49:08 2012
@@ -1127,7 +1127,7 @@ public class DFSAdmin extends FsShell {
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
throws IOException {
- InetSocketAddress datanodeAddr = DFSUtil.getSocketAddress(datanode);
+ InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
// Get the current configuration
Configuration conf = getConf();
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Fri Feb 10 01:49:08 2012
@@ -25,6 +25,7 @@ import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLConnection;
import java.security.PrivilegedExceptionAction;
@@ -49,6 +50,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
@@ -204,7 +206,8 @@ public class DelegationTokenFetcher {
static public Credentials getDTfromRemote(String nnAddr,
String renewer) throws IOException {
DataInputStream dis = null;
-
+ InetSocketAddress serviceAddr = NetUtils.createSocketAddr(nnAddr);
+
try {
StringBuffer url = new StringBuffer();
if (renewer != null) {
@@ -229,9 +232,7 @@ public class DelegationTokenFetcher {
ts.readFields(dis);
for(Token<?> token: ts.getAllTokens()) {
token.setKind(HftpFileSystem.TOKEN_KIND);
- token.setService(new Text(SecurityUtil.buildDTServiceName
- (remoteURL.toURI(),
- DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT)));
+ SecurityUtil.setTokenService(token, serviceAddr);
}
return ts;
} catch (Exception e) {
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java Fri Feb 10 01:49:08 2012
@@ -21,7 +21,7 @@ import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
-import java.util.Collection;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -50,6 +50,9 @@ import org.apache.hadoop.util.ToolRunner
* {@link GetConf.Command}.
*
* See {@link GetConf.Command#NAMENODE} for example.
+ *
+ * Add for the new option added, a map entry with the corresponding
+ * {@link GetConf.CommandHandler}.
* </ul>
*/
public class GetConf extends Configured implements Tool {
@@ -57,31 +60,40 @@ public class GetConf extends Configured
+ "getting configuration information from the config file.\n";
enum Command {
- NAMENODE("-namenodes", new NameNodesCommandHandler(),
- "gets list of namenodes in the cluster."),
- SECONDARY("-secondaryNameNodes", new SecondaryNameNodesCommandHandler(),
+ NAMENODE("-namenodes", "gets list of namenodes in the cluster."),
+ SECONDARY("-secondaryNameNodes",
"gets list of secondary namenodes in the cluster."),
- BACKUP("-backupNodes", new BackupNodesCommandHandler(),
- "gets list of backup nodes in the cluster."),
+ BACKUP("-backupNodes", "gets list of backup nodes in the cluster."),
INCLUDE_FILE("-includeFile",
- new CommandHandler("DFSConfigKeys.DFS_HOSTS"),
"gets the include file path that defines the datanodes " +
"that can join the cluster."),
EXCLUDE_FILE("-excludeFile",
- new CommandHandler("DFSConfigKeys.DFS_HOSTS_EXCLUDE"),
"gets the exclude file path that defines the datanodes " +
"that need to decommissioned."),
- NNRPCADDRESSES("-nnRpcAddresses",
- new NNRpcAddressesCommandHandler(),
- "gets the namenode rpc addresses");
+ NNRPCADDRESSES("-nnRpcAddresses", "gets the namenode rpc addresses");
+ private static Map<String, CommandHandler> map;
+ static {
+ map = new HashMap<String, CommandHandler>();
+ map.put(NAMENODE.getName().toLowerCase(),
+ new NameNodesCommandHandler());
+ map.put(SECONDARY.getName().toLowerCase(),
+ new SecondaryNameNodesCommandHandler());
+ map.put(BACKUP.getName().toLowerCase(),
+ new BackupNodesCommandHandler());
+ map.put(INCLUDE_FILE.getName().toLowerCase(),
+ new CommandHandler("DFSConfigKeys.DFS_HOSTS"));
+ map.put(EXCLUDE_FILE.getName().toLowerCase(),
+ new CommandHandler("DFSConfigKeys.DFS_HOSTS_EXCLUDE"));
+ map.put(NNRPCADDRESSES.getName().toLowerCase(),
+ new NNRpcAddressesCommandHandler());
+ }
+
private final String cmd;
- private final CommandHandler handler;
private final String description;
- Command(String cmd, CommandHandler handler, String description) {
+ Command(String cmd, String description) {
this.cmd = cmd;
- this.handler = handler;
this.description = description;
}
@@ -94,12 +106,7 @@ public class GetConf extends Configured
}
public static CommandHandler getHandler(String name) {
- for (Command cmd : values()) {
- if (cmd.getName().equalsIgnoreCase(name)) {
- return cmd.handler;
- }
- }
- return null;
+ return map.get(name.toLowerCase());
}
}
Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Fri Feb 10 01:49:08 2012
@@ -141,6 +141,7 @@ public class WebHdfsFileSystem extends F
private final UserGroupInformation ugi;
private InetSocketAddress nnAddr;
+ private URI uri;
private Token<?> delegationToken;
private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
private Path workingDir;
@@ -158,7 +159,11 @@ public class WebHdfsFileSystem extends F
) throws IOException {
super.initialize(uri, conf);
setConf(conf);
-
+ try {
+ this.uri = new URI(uri.getScheme(), uri.getAuthority(), null, null, null);
+ } catch (URISyntaxException e) {
+ throw new IllegalArgumentException(e);
+ }
this.nnAddr = NetUtils.createSocketAddr(uri.toString());
this.workingDir = getHomeDirectory();
@@ -203,12 +208,7 @@ public class WebHdfsFileSystem extends F
@Override
public URI getUri() {
- try {
- return new URI(SCHEME, null, nnAddr.getHostName(), nnAddr.getPort(),
- null, null, null);
- } catch (URISyntaxException e) {
- return null;
- }
+ return this.uri;
}
/** @return the home directory. */
@@ -810,8 +810,7 @@ public class WebHdfsFileSystem extends F
final Token<?> token, final Configuration conf
) throws IOException, InterruptedException, URISyntaxException {
- final InetSocketAddress nnAddr = NetUtils.createSocketAddr(
- token.getService().toString());
+ final InetSocketAddress nnAddr = SecurityUtil.getTokenServiceAddr(token);
final URI uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, nnAddr);
return (WebHdfsFileSystem)FileSystem.get(uri, conf);
}
@@ -821,7 +820,7 @@ public class WebHdfsFileSystem extends F
) throws IOException, InterruptedException {
final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
// update the kerberos credentials, if they are coming from a keytab
- ugi.checkTGTAndReloginFromKeytab();
+ ugi.reloginFromKeytab();
try {
WebHdfsFileSystem webhdfs = getWebHdfs(token, conf);
Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1237154
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:1152502-1242632
/hadoop/core/branches/branch-0.19/hdfs/src/main/native:713112
/hadoop/core/branches/branch-0.19/mapred/src/c++/libhdfs:713112
/hadoop/core/trunk/src/c++/libhdfs:776175-784663