You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2014/08/01 22:41:06 UTC
svn commit: r1615223 [1/2] - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/protocol/
src/main/java/org/apache/hadoop/hdfs/server/datanode/
src/main/java/org/apache...
Author: cmccabe
Date: Fri Aug 1 20:41:05 2014
New Revision: 1615223
URL: http://svn.apache.org/r1615223
Log:
HDFS-6482. Use block ID-based block layout on datanodes (James Thomas via Colin Patrick McCabe)
Added:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz (with props)
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt
Removed:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LDir.java
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Aug 1 20:41:05 2014
@@ -130,6 +130,9 @@ Trunk (Unreleased)
HDFS-6609. Use DirectorySnapshottableFeature to represent a snapshottable
directory. (Jing Zhao via wheat9)
+ HDFS-6482. Use block ID-based block layout on datanodes (James Thomas via
+ Colin Patrick McCabe)
+
OPTIMIZATIONS
BUG FIXES
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Fri Aug 1 20:41:05 2014
@@ -381,8 +381,6 @@ public class DFSConfigKeys extends Commo
public static final String DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTP_DEFAULT_PORT;
public static final String DFS_DATANODE_MAX_RECEIVER_THREADS_KEY = "dfs.datanode.max.transfer.threads";
public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;
- public static final String DFS_DATANODE_NUMBLOCKS_KEY = "dfs.datanode.numblocks";
- public static final int DFS_DATANODE_NUMBLOCKS_DEFAULT = 64;
public static final String DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours";
public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
public static final String DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";
@@ -666,4 +664,8 @@ public class DFSConfigKeys extends Commo
public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY =
"dfs.datanode.slow.io.warning.threshold.ms";
public static final long DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 300;
+
+ public static final String DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY =
+ "dfs.datanode.block.id.layout.upgrade.threads";
+ public static final int DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS = 12;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java Fri Aug 1 20:41:05 2014
@@ -50,6 +50,9 @@ public class Block implements Writable,
public static final Pattern metaFilePattern = Pattern
.compile(BLOCK_FILE_PREFIX + "(-??\\d++)_(\\d++)\\" + METADATA_EXTENSION
+ "$");
+ public static final Pattern metaOrBlockFilePattern = Pattern
+ .compile(BLOCK_FILE_PREFIX + "(-??\\d++)(_(\\d++)\\" + METADATA_EXTENSION
+ + ")?$");
public static boolean isBlockFilename(File f) {
String name = f.getName();
@@ -65,6 +68,11 @@ public class Block implements Writable,
return metaFilePattern.matcher(name).matches();
}
+ public static File metaToBlockFile(File metaFile) {
+ return new File(metaFile.getParent(), metaFile.getName().substring(
+ 0, metaFile.getName().lastIndexOf('_')));
+ }
+
/**
* Get generation stamp from the name of the metafile name
*/
@@ -75,10 +83,10 @@ public class Block implements Writable,
}
/**
- * Get the blockId from the name of the metafile name
+ * Get the blockId from the name of the meta or block file
*/
- public static long getBlockId(String metaFile) {
- Matcher m = metaFilePattern.matcher(metaFile);
+ public static long getBlockId(String metaOrBlockFile) {
+ Matcher m = metaOrBlockFilePattern.matcher(metaOrBlockFile);
return m.matches() ? Long.parseLong(m.group(1)) : 0;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java Fri Aug 1 20:41:05 2014
@@ -152,7 +152,7 @@ public class BlockPoolSliceStorage exten
// During startup some of them can upgrade or roll back
// while others could be up-to-date for the regular startup.
for (int idx = 0; idx < getNumStorageDirs(); idx++) {
- doTransition(getStorageDir(idx), nsInfo, startOpt);
+ doTransition(datanode, getStorageDir(idx), nsInfo, startOpt);
assert getCTime() == nsInfo.getCTime()
: "Data-node and name-node CTimes must be the same.";
}
@@ -242,7 +242,7 @@ public class BlockPoolSliceStorage exten
* @param startOpt startup option
* @throws IOException
*/
- private void doTransition(StorageDirectory sd,
+ private void doTransition(DataNode datanode, StorageDirectory sd,
NamespaceInfo nsInfo, StartupOption startOpt) throws IOException {
if (startOpt == StartupOption.ROLLBACK) {
doRollback(sd, nsInfo); // rollback if applicable
@@ -275,7 +275,7 @@ public class BlockPoolSliceStorage exten
}
if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION
|| this.cTime < nsInfo.getCTime()) {
- doUpgrade(sd, nsInfo); // upgrade
+ doUpgrade(datanode, sd, nsInfo); // upgrade
return;
}
// layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
@@ -304,7 +304,8 @@ public class BlockPoolSliceStorage exten
* @param nsInfo Namespace Info from the namenode
* @throws IOException on error
*/
- void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException {
+ void doUpgrade(DataNode datanode, StorageDirectory bpSd, NamespaceInfo nsInfo)
+ throws IOException {
// Upgrading is applicable only to release with federation or after
if (!DataNodeLayoutVersion.supports(
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
@@ -340,7 +341,7 @@ public class BlockPoolSliceStorage exten
rename(bpCurDir, bpTmpDir);
// 3. Create new <SD>/current with block files hardlinks and VERSION
- linkAllBlocks(bpTmpDir, bpCurDir);
+ linkAllBlocks(datanode, bpTmpDir, bpCurDir);
this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
assert this.namespaceID == nsInfo.getNamespaceID()
: "Data-node and name-node layout versions must be the same.";
@@ -517,14 +518,15 @@ public class BlockPoolSliceStorage exten
* @param toDir the current data directory
* @throws IOException if error occurs during hardlink
*/
- private void linkAllBlocks(File fromDir, File toDir) throws IOException {
+ private void linkAllBlocks(DataNode datanode, File fromDir, File toDir)
+ throws IOException {
// do the link
int diskLayoutVersion = this.getLayoutVersion();
// hardlink finalized blocks in tmpDir
HardLink hardLink = new HardLink();
- DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED),
+ DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED),
new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
- DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_RBW),
+ DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_RBW),
new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
LOG.info( hardLink.linkStats.report() );
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java Fri Aug 1 20:41:05 2014
@@ -62,7 +62,10 @@ public class DataNodeLayoutVersion {
* </ul>
*/
public static enum Feature implements LayoutFeature {
- FIRST_LAYOUT(-55, -53, "First datanode layout", false);
+ FIRST_LAYOUT(-55, -53, "First datanode layout", false),
+ BLOCKID_BASED_LAYOUT(-56,
+ "The block ID of a finalized block uniquely determines its position " +
+ "in the directory structure");
private final FeatureInfo info;
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Fri Aug 1 20:41:05 2014
@@ -18,13 +18,19 @@
package org.apache.hadoop.hdfs.server.datanode;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.Futures;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.HardLink;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
@@ -35,13 +41,30 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DiskChecker;
-import java.io.*;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
import java.nio.channels.FileLock;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
/**
* Data storage information file.
@@ -261,6 +284,7 @@ public class DataStorage extends Storage
STORAGE_DIR_CURRENT));
bpDataDirs.add(bpRoot);
}
+
// mkdir for the list of BlockPoolStorage
makeBlockPoolDataDir(bpDataDirs, null);
BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(
@@ -488,7 +512,7 @@ public class DataStorage extends Storage
// do upgrade
if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION) {
- doUpgrade(sd, nsInfo); // upgrade
+ doUpgrade(datanode, sd, nsInfo); // upgrade
return;
}
@@ -523,7 +547,8 @@ public class DataStorage extends Storage
* @param sd storage directory
* @throws IOException on error
*/
- void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
+ void doUpgrade(DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo)
+ throws IOException {
// If the existing on-disk layout version supportes federation, simply
// update its layout version.
if (DataNodeLayoutVersion.supports(
@@ -568,7 +593,8 @@ public class DataStorage extends Storage
BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(nsInfo.getNamespaceID(),
nsInfo.getBlockPoolID(), nsInfo.getCTime(), nsInfo.getClusterID());
bpStorage.format(curDir, nsInfo);
- linkAllBlocks(tmpDir, bbwDir, new File(curBpDir, STORAGE_DIR_CURRENT));
+ linkAllBlocks(datanode, tmpDir, bbwDir, new File(curBpDir,
+ STORAGE_DIR_CURRENT));
// 4. Write version file under <SD>/current
layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
@@ -746,22 +772,22 @@ public class DataStorage extends Storage
*
* @throws IOException If error occurs during hardlink
*/
- private void linkAllBlocks(File fromDir, File fromBbwDir, File toDir)
- throws IOException {
+ private void linkAllBlocks(DataNode datanode, File fromDir, File fromBbwDir,
+ File toDir) throws IOException {
HardLink hardLink = new HardLink();
// do the link
int diskLayoutVersion = this.getLayoutVersion();
if (DataNodeLayoutVersion.supports(
LayoutVersion.Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
// hardlink finalized blocks in tmpDir/finalized
- linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED),
+ linkBlocks(datanode, new File(fromDir, STORAGE_DIR_FINALIZED),
new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
// hardlink rbw blocks in tmpDir/rbw
- linkBlocks(new File(fromDir, STORAGE_DIR_RBW),
+ linkBlocks(datanode, new File(fromDir, STORAGE_DIR_RBW),
new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
} else { // pre-RBW version
// hardlink finalized blocks in tmpDir
- linkBlocks(fromDir, new File(toDir, STORAGE_DIR_FINALIZED),
+ linkBlocks(datanode, fromDir, new File(toDir, STORAGE_DIR_FINALIZED),
diskLayoutVersion, hardLink);
if (fromBbwDir.exists()) {
/*
@@ -770,15 +796,67 @@ public class DataStorage extends Storage
* NOT underneath the 'current' directory in those releases. See
* HDFS-3731 for details.
*/
- linkBlocks(fromBbwDir,
+ linkBlocks(datanode, fromBbwDir,
new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
}
}
LOG.info( hardLink.linkStats.report() );
}
+
+ private static class LinkArgs {
+ public File src;
+ public File dst;
+
+ public LinkArgs(File src, File dst) {
+ this.src = src;
+ this.dst = dst;
+ }
+ }
+
+ static void linkBlocks(DataNode datanode, File from, File to, int oldLV,
+ HardLink hl) throws IOException {
+ boolean upgradeToIdBasedLayout = false;
+ // If we are upgrading from a version older than the one where we introduced
+ // block ID-based layout AND we're working with the finalized directory,
+ // we'll need to upgrade from the old flat layout to the block ID-based one
+ if (oldLV > DataNodeLayoutVersion.Feature.BLOCKID_BASED_LAYOUT.getInfo().
+ getLayoutVersion() && to.getName().equals(STORAGE_DIR_FINALIZED)) {
+ upgradeToIdBasedLayout = true;
+ }
+
+ final List<LinkArgs> idBasedLayoutSingleLinks = Lists.newArrayList();
+ linkBlocksHelper(from, to, oldLV, hl, upgradeToIdBasedLayout, to,
+ idBasedLayoutSingleLinks);
+ int numLinkWorkers = datanode.getConf().getInt(
+ DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY,
+ DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS);
+ ExecutorService linkWorkers = Executors.newFixedThreadPool(numLinkWorkers);
+ final int step = idBasedLayoutSingleLinks.size() / numLinkWorkers + 1;
+ List<Future<Void>> futures = Lists.newArrayList();
+ for (int i = 0; i < idBasedLayoutSingleLinks.size(); i += step) {
+ final int iCopy = i;
+ futures.add(linkWorkers.submit(new Callable<Void>() {
+ @Override
+ public Void call() throws IOException {
+ int upperBound = Math.min(iCopy + step,
+ idBasedLayoutSingleLinks.size());
+ for (int j = iCopy; j < upperBound; j++) {
+ LinkArgs cur = idBasedLayoutSingleLinks.get(j);
+ NativeIO.link(cur.src, cur.dst);
+ }
+ return null;
+ }
+ }));
+ }
+ linkWorkers.shutdown();
+ for (Future<Void> f : futures) {
+ Futures.get(f, IOException.class);
+ }
+ }
- static void linkBlocks(File from, File to, int oldLV, HardLink hl)
- throws IOException {
+ static void linkBlocksHelper(File from, File to, int oldLV, HardLink hl,
+ boolean upgradeToIdBasedLayout, File blockRoot,
+ List<LinkArgs> idBasedLayoutSingleLinks) throws IOException {
if (!from.exists()) {
return;
}
@@ -805,9 +883,6 @@ public class DataStorage extends Storage
// from is a directory
hl.linkStats.countDirs++;
- if (!to.mkdirs())
- throw new IOException("Cannot create directory " + to);
-
String[] blockNames = from.list(new java.io.FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
@@ -815,12 +890,36 @@ public class DataStorage extends Storage
}
});
+ // If we are upgrading to block ID-based layout, we don't want to recreate
+ // any subdirs from the source that contain blocks, since we have a new
+ // directory structure
+ if (!upgradeToIdBasedLayout || !to.getName().startsWith(
+ BLOCK_SUBDIR_PREFIX)) {
+ if (!to.mkdirs())
+ throw new IOException("Cannot create directory " + to);
+ }
+
// Block files just need hard links with the same file names
// but a different directory
if (blockNames.length > 0) {
- HardLink.createHardLinkMult(from, blockNames, to);
- hl.linkStats.countMultLinks++;
- hl.linkStats.countFilesMultLinks += blockNames.length;
+ if (upgradeToIdBasedLayout) {
+ for (String blockName : blockNames) {
+ long blockId = Block.getBlockId(blockName);
+ File blockLocation = DatanodeUtil.idToBlockDir(blockRoot, blockId);
+ if (!blockLocation.exists()) {
+ if (!blockLocation.mkdirs()) {
+ throw new IOException("Failed to mkdirs " + blockLocation);
+ }
+ }
+ idBasedLayoutSingleLinks.add(new LinkArgs(new File(from, blockName),
+ new File(blockLocation, blockName)));
+ hl.linkStats.countSingleLinks++;
+ }
+ } else {
+ HardLink.createHardLinkMult(from, blockNames, to);
+ hl.linkStats.countMultLinks++;
+ hl.linkStats.countFilesMultLinks += blockNames.length;
+ }
} else {
hl.linkStats.countEmptyDirs++;
}
@@ -834,8 +933,9 @@ public class DataStorage extends Storage
}
});
for(int i = 0; i < otherNames.length; i++)
- linkBlocks(new File(from, otherNames[i]),
- new File(to, otherNames[i]), oldLV, hl);
+ linkBlocksHelper(new File(from, otherNames[i]),
+ new File(to, otherNames[i]), oldLV, hl, upgradeToIdBasedLayout,
+ blockRoot, idBasedLayoutSingleLinks);
}
/**
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java Fri Aug 1 20:41:05 2014
@@ -30,6 +30,8 @@ public class DatanodeUtil {
public static final String DISK_ERROR = "Possible disk error: ";
+ private static final String SEP = System.getProperty("file.separator");
+
/** Get the cause of an I/O exception if caused by a possible disk error
* @param ioe an I/O exception
* @return cause if the I/O exception is caused by a possible disk error;
@@ -78,4 +80,38 @@ public class DatanodeUtil {
public static File getUnlinkTmpFile(File f) {
return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
}
+
+ /**
+ * Checks whether there are any files anywhere in the directory tree rooted
+ * at dir (directories don't count as files). dir must exist
+ * @return true if there are no files
+ * @throws IOException if unable to list subdirectories
+ */
+ public static boolean dirNoFilesRecursive(File dir) throws IOException {
+ File[] contents = dir.listFiles();
+ if (contents == null) {
+ throw new IOException("Cannot list contents of " + dir);
+ }
+ for (File f : contents) {
+ if (!f.isDirectory() || (f.isDirectory() && !dirNoFilesRecursive(f))) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Get the directory where a finalized block with this ID should be stored.
+ * Do not attempt to create the directory.
+ * @param root the root directory where finalized blocks are stored
+ * @param blockId
+ * @return
+ */
+ public static File idToBlockDir(File root, long blockId) {
+ int d1 = (int)((blockId >> 16) & 0xff);
+ int d2 = (int)((blockId >> 8) & 0xff);
+ String path = DataStorage.BLOCK_SUBDIR_PREFIX + d1 + SEP +
+ DataStorage.BLOCK_SUBDIR_PREFIX + d2;
+ return new File(root, path);
+ }
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java Fri Aug 1 20:41:05 2014
@@ -54,10 +54,10 @@ abstract public class ReplicaInfo extend
private File baseDir;
/**
- * Ints representing the sub directory path from base dir to the directory
- * containing this replica.
+ * Whether or not this replica's parent directory includes subdirs, in which
+ * case we can generate them based on the replica's block ID
*/
- private int[] subDirs;
+ private boolean hasSubdirs;
private static final Map<String, File> internedBaseDirs = new HashMap<String, File>();
@@ -151,18 +151,8 @@ abstract public class ReplicaInfo extend
* @return the parent directory path where this replica is located
*/
File getDir() {
- if (subDirs == null) {
- return null;
- }
-
- StringBuilder sb = new StringBuilder();
- for (int i : subDirs) {
- sb.append(DataStorage.BLOCK_SUBDIR_PREFIX);
- sb.append(i);
- sb.append("/");
- }
- File ret = new File(baseDir, sb.toString());
- return ret;
+ return hasSubdirs ? DatanodeUtil.idToBlockDir(baseDir,
+ getBlockId()) : baseDir;
}
/**
@@ -175,54 +165,46 @@ abstract public class ReplicaInfo extend
private void setDirInternal(File dir) {
if (dir == null) {
- subDirs = null;
baseDir = null;
return;
}
- ReplicaDirInfo replicaDirInfo = parseSubDirs(dir);
- this.subDirs = replicaDirInfo.subDirs;
+ ReplicaDirInfo dirInfo = parseBaseDir(dir);
+ this.hasSubdirs = dirInfo.hasSubidrs;
synchronized (internedBaseDirs) {
- if (!internedBaseDirs.containsKey(replicaDirInfo.baseDirPath)) {
+ if (!internedBaseDirs.containsKey(dirInfo.baseDirPath)) {
// Create a new String path of this file and make a brand new File object
// to guarantee we drop the reference to the underlying char[] storage.
- File baseDir = new File(replicaDirInfo.baseDirPath);
- internedBaseDirs.put(replicaDirInfo.baseDirPath, baseDir);
+ File baseDir = new File(dirInfo.baseDirPath);
+ internedBaseDirs.put(dirInfo.baseDirPath, baseDir);
}
- this.baseDir = internedBaseDirs.get(replicaDirInfo.baseDirPath);
+ this.baseDir = internedBaseDirs.get(dirInfo.baseDirPath);
}
}
-
+
@VisibleForTesting
public static class ReplicaDirInfo {
- @VisibleForTesting
public String baseDirPath;
-
- @VisibleForTesting
- public int[] subDirs;
+ public boolean hasSubidrs;
+
+ public ReplicaDirInfo (String baseDirPath, boolean hasSubidrs) {
+ this.baseDirPath = baseDirPath;
+ this.hasSubidrs = hasSubidrs;
+ }
}
@VisibleForTesting
- public static ReplicaDirInfo parseSubDirs(File dir) {
- ReplicaDirInfo ret = new ReplicaDirInfo();
+ public static ReplicaDirInfo parseBaseDir(File dir) {
File currentDir = dir;
- List<Integer> subDirList = new ArrayList<Integer>();
+ boolean hasSubdirs = false;
while (currentDir.getName().startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)) {
- // Prepend the integer into the list.
- subDirList.add(0, Integer.parseInt(currentDir.getName().replaceFirst(
- DataStorage.BLOCK_SUBDIR_PREFIX, "")));
+ hasSubdirs = true;
currentDir = currentDir.getParentFile();
}
- ret.subDirs = new int[subDirList.size()];
- for (int i = 0; i < subDirList.size(); i++) {
- ret.subDirs[i] = subDirList.get(i);
- }
-
- ret.baseDirPath = currentDir.getAbsolutePath();
- return ret;
+ return new ReplicaDirInfo(currentDir.getAbsolutePath(), hasSubdirs);
}
/**
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java Fri Aug 1 20:41:05 2014
@@ -59,7 +59,8 @@ class BlockPoolSlice {
private final String bpid;
private final FsVolumeImpl volume; // volume to which this BlockPool belongs to
private final File currentDir; // StorageDirectory/current/bpid/current
- private final LDir finalizedDir; // directory store Finalized replica
+ // directory where finalized replicas are stored
+ private final File finalizedDir;
private final File rbwDir; // directory store RBW replica
private final File tmpDir; // directory store Temporary replica
private static final String DU_CACHE_FILE = "dfsUsed";
@@ -82,8 +83,13 @@ class BlockPoolSlice {
this.bpid = bpid;
this.volume = volume;
this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
- final File finalizedDir = new File(
+ this.finalizedDir = new File(
currentDir, DataStorage.STORAGE_DIR_FINALIZED);
+ if (!this.finalizedDir.exists()) {
+ if (!this.finalizedDir.mkdirs()) {
+ throw new IOException("Failed to mkdirs " + this.finalizedDir);
+ }
+ }
// Files that were being written when the datanode was last shutdown
// are now moved back to the data directory. It is possible that
@@ -95,10 +101,6 @@ class BlockPoolSlice {
FileUtil.fullyDelete(tmpDir);
}
this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
- final int maxBlocksPerDir = conf.getInt(
- DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
- DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);
- this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir);
if (!rbwDir.mkdirs()) { // create rbw directory if not exist
if (!rbwDir.isDirectory()) {
throw new IOException("Mkdirs failed to create " + rbwDir.toString());
@@ -131,7 +133,7 @@ class BlockPoolSlice {
}
File getFinalizedDir() {
- return finalizedDir.dir;
+ return finalizedDir;
}
File getRbwDir() {
@@ -239,26 +241,57 @@ class BlockPoolSlice {
}
File addBlock(Block b, File f) throws IOException {
- File blockFile = finalizedDir.addBlock(b, f);
+ File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
+ if (!blockDir.exists()) {
+ if (!blockDir.mkdirs()) {
+ throw new IOException("Failed to mkdirs " + blockDir);
+ }
+ }
+ File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir);
File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
return blockFile;
}
void checkDirs() throws DiskErrorException {
- finalizedDir.checkDirTree();
+ DiskChecker.checkDirs(finalizedDir);
DiskChecker.checkDir(tmpDir);
DiskChecker.checkDir(rbwDir);
}
void getVolumeMap(ReplicaMap volumeMap) throws IOException {
// add finalized replicas
- finalizedDir.getVolumeMap(bpid, volumeMap, volume);
+ addToReplicasMap(volumeMap, finalizedDir, true);
// add rbw replicas
addToReplicasMap(volumeMap, rbwDir, false);
}
/**
+ * Recover an unlinked tmp file on datanode restart. If the original block
+ * does not exist, then the tmp file is renamed to be the
+ * original file name and the original name is returned; otherwise the tmp
+ * file is deleted and null is returned.
+ */
+ File recoverTempUnlinkedBlock(File unlinkedTmp) throws IOException {
+ File blockFile = FsDatasetUtil.getOrigFile(unlinkedTmp);
+ if (blockFile.exists()) {
+ // If the original block file still exists, then no recovery is needed.
+ if (!unlinkedTmp.delete()) {
+ throw new IOException("Unable to cleanup unlinked tmp file " +
+ unlinkedTmp);
+ }
+ return null;
+ } else {
+ if (!unlinkedTmp.renameTo(blockFile)) {
+ throw new IOException("Unable to rename unlinked tmp file " +
+ unlinkedTmp);
+ }
+ return blockFile;
+ }
+ }
+
+
+ /**
* Add replicas under the given directory to the volume map
* @param volumeMap the replicas map
* @param dir an input directory
@@ -267,23 +300,34 @@ class BlockPoolSlice {
*/
void addToReplicasMap(ReplicaMap volumeMap, File dir, boolean isFinalized
) throws IOException {
- File blockFiles[] = FileUtil.listFiles(dir);
- for (File blockFile : blockFiles) {
- if (!Block.isBlockFilename(blockFile))
+ File files[] = FileUtil.listFiles(dir);
+ for (File file : files) {
+ if (file.isDirectory()) {
+ addToReplicasMap(volumeMap, file, isFinalized);
+ }
+
+ if (isFinalized && FsDatasetUtil.isUnlinkTmpFile(file)) {
+ file = recoverTempUnlinkedBlock(file);
+ if (file == null) { // the original block still exists, so we cover it
+ // in another iteration and can continue here
+ continue;
+ }
+ }
+ if (!Block.isBlockFilename(file))
continue;
long genStamp = FsDatasetUtil.getGenerationStampFromFile(
- blockFiles, blockFile);
- long blockId = Block.filename2id(blockFile.getName());
+ files, file);
+ long blockId = Block.filename2id(file.getName());
ReplicaInfo newReplica = null;
if (isFinalized) {
newReplica = new FinalizedReplica(blockId,
- blockFile.length(), genStamp, volume, blockFile.getParentFile());
+ file.length(), genStamp, volume, file.getParentFile());
} else {
boolean loadRwr = true;
- File restartMeta = new File(blockFile.getParent() +
- File.pathSeparator + "." + blockFile.getName() + ".restart");
+ File restartMeta = new File(file.getParent() +
+ File.pathSeparator + "." + file.getName() + ".restart");
Scanner sc = null;
try {
sc = new Scanner(restartMeta);
@@ -291,8 +335,8 @@ class BlockPoolSlice {
if (sc.hasNextLong() && (sc.nextLong() > Time.now())) {
// It didn't expire. Load the replica as a RBW.
newReplica = new ReplicaBeingWritten(blockId,
- validateIntegrityAndSetLength(blockFile, genStamp),
- genStamp, volume, blockFile.getParentFile(), null);
+ validateIntegrityAndSetLength(file, genStamp),
+ genStamp, volume, file.getParentFile(), null);
loadRwr = false;
}
sc.close();
@@ -301,7 +345,7 @@ class BlockPoolSlice {
restartMeta.getPath());
}
} catch (FileNotFoundException fnfe) {
- // nothing to do here
+ // nothing to do hereFile dir =
} finally {
if (sc != null) {
sc.close();
@@ -310,15 +354,15 @@ class BlockPoolSlice {
// Restart meta doesn't exist or expired.
if (loadRwr) {
newReplica = new ReplicaWaitingToBeRecovered(blockId,
- validateIntegrityAndSetLength(blockFile, genStamp),
- genStamp, volume, blockFile.getParentFile());
+ validateIntegrityAndSetLength(file, genStamp),
+ genStamp, volume, file.getParentFile());
}
}
ReplicaInfo oldReplica = volumeMap.add(bpid, newReplica);
if (oldReplica != null) {
FsDatasetImpl.LOG.warn("Two block files with the same block id exist " +
- "on disk: " + oldReplica.getBlockFile() + " and " + blockFile );
+ "on disk: " + oldReplica.getBlockFile() + " and " + file );
}
}
}
@@ -405,10 +449,6 @@ class BlockPoolSlice {
}
}
- void clearPath(File f) {
- finalizedDir.clearPath(f);
- }
-
@Override
public String toString() {
return currentDir.getAbsolutePath();
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java Fri Aug 1 20:41:05 2014
@@ -1224,13 +1224,6 @@ class FsDatasetImpl implements FsDataset
+ ". Parent not found for file " + f);
continue;
}
- ReplicaState replicaState = info.getState();
- if (replicaState == ReplicaState.FINALIZED ||
- (replicaState == ReplicaState.RUR &&
- ((ReplicaUnderRecovery)info).getOriginalReplica().getState() ==
- ReplicaState.FINALIZED)) {
- v.clearPath(bpid, parent);
- }
volumeMap.remove(bpid, invalidBlks[i]);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java Fri Aug 1 20:41:05 2014
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
+import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -235,10 +236,6 @@ class FsVolumeImpl implements FsVolumeSp
// dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
bp.addToReplicasMap(volumeMap, dir, isFinalized);
}
-
- void clearPath(String bpid, File f) throws IOException {
- getBlockPoolSlice(bpid).clearPath(f);
- }
@Override
public String toString() {
@@ -274,7 +271,8 @@ class FsVolumeImpl implements FsVolumeSp
File finalizedDir = new File(bpCurrentDir,
DataStorage.STORAGE_DIR_FINALIZED);
File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
- if (finalizedDir.exists() && FileUtil.list(finalizedDir).length != 0) {
+ if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive(
+ finalizedDir)) {
return false;
}
if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
@@ -301,7 +299,8 @@ class FsVolumeImpl implements FsVolumeSp
if (!rbwDir.delete()) {
throw new IOException("Failed to delete " + rbwDir);
}
- if (!finalizedDir.delete()) {
+ if (!DatanodeUtil.dirNoFilesRecursive(finalizedDir) ||
+ !FileUtil.fullyDelete(finalizedDir)) {
throw new IOException("Failed to delete " + finalizedDir);
}
FileUtil.fullyDelete(tmpDir);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Fri Aug 1 20:41:05 2014
@@ -2052,4 +2052,12 @@
</description>
</property>
+<property>
+ <name>dfs.datanode.block.id.layout.upgrade.threads</name>
+ <value>12</value>
+ <description>The number of threads to use when creating hard links from
+ current to previous blocks during upgrade of a DataNode to block ID-based
+ block layout (see HDFS-6482 for details on the layout).</description>
+</property>
+
</configuration>
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Fri Aug 1 20:41:05 2014
@@ -2353,8 +2353,8 @@ public class MiniDFSCluster {
* @return data file corresponding to the block
*/
public static File getBlockFile(File storageDir, ExtendedBlock blk) {
- return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()),
- blk.getBlockName());
+ return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir,
+ blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName());
}
/**
@@ -2364,10 +2364,32 @@ public class MiniDFSCluster {
* @return metadata file corresponding to the block
*/
public static File getBlockMetadataFile(File storageDir, ExtendedBlock blk) {
- return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()),
- blk.getBlockName() + "_" + blk.getGenerationStamp() +
- Block.METADATA_EXTENSION);
-
+ return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir,
+ blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName() + "_" +
+ blk.getGenerationStamp() + Block.METADATA_EXTENSION);
+ }
+
+ /**
+ * Return all block metadata files in given directory (recursive search)
+ */
+ public static List<File> getAllBlockMetadataFiles(File storageDir) {
+ List<File> results = new ArrayList<File>();
+ File[] files = storageDir.listFiles();
+ if (files == null) {
+ return null;
+ }
+ for (File f : files) {
+ if (f.getName().startsWith("blk_") && f.getName().endsWith(
+ Block.METADATA_EXTENSION)) {
+ results.add(f);
+ } else if (f.isDirectory()) {
+ List<File> subdirResults = getAllBlockMetadataFiles(f);
+ if (subdirResults != null) {
+ results.addAll(subdirResults);
+ }
+ }
+ }
+ return results;
}
/**
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java Fri Aug 1 20:41:05 2014
@@ -79,8 +79,8 @@ public class TestDFSFinalize {
File dnCurDirs[] = new File[dataNodeDirs.length];
for (int i = 0; i < dataNodeDirs.length; i++) {
dnCurDirs[i] = new File(dataNodeDirs[i],"current");
- assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i]),
- UpgradeUtilities.checksumMasterDataNodeContents());
+ assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i],
+ false), UpgradeUtilities.checksumMasterDataNodeContents());
}
for (int i = 0; i < nameNodeDirs.length; i++) {
assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
@@ -96,8 +96,9 @@ public class TestDFSFinalize {
assertFalse(new File(bpRoot,"previous").isDirectory());
File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
- assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurFinalizeDir),
- UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
+ assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
+ bpCurFinalizeDir, true),
+ UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
}
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java Fri Aug 1 20:41:05 2014
@@ -81,7 +81,7 @@ public class TestDFSRollback {
break;
case DATA_NODE:
assertEquals(
- UpgradeUtilities.checksumContents(nodeType, curDir),
+ UpgradeUtilities.checksumContents(nodeType, curDir, false),
UpgradeUtilities.checksumMasterDataNodeContents());
break;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java Fri Aug 1 20:41:05 2014
@@ -239,7 +239,7 @@ public class TestDFSStorageStateRecovery
assertTrue(new File(baseDirs[i],"previous").isDirectory());
assertEquals(
UpgradeUtilities.checksumContents(
- NAME_NODE, new File(baseDirs[i],"previous")),
+ NAME_NODE, new File(baseDirs[i],"previous"), false),
UpgradeUtilities.checksumMasterNameNodeContents());
}
}
@@ -259,7 +259,8 @@ public class TestDFSStorageStateRecovery
if (currentShouldExist) {
for (int i = 0; i < baseDirs.length; i++) {
assertEquals(
- UpgradeUtilities.checksumContents(DATA_NODE, new File(baseDirs[i],"current")),
+ UpgradeUtilities.checksumContents(DATA_NODE,
+ new File(baseDirs[i],"current"), false),
UpgradeUtilities.checksumMasterDataNodeContents());
}
}
@@ -267,7 +268,8 @@ public class TestDFSStorageStateRecovery
for (int i = 0; i < baseDirs.length; i++) {
assertTrue(new File(baseDirs[i],"previous").isDirectory());
assertEquals(
- UpgradeUtilities.checksumContents(DATA_NODE, new File(baseDirs[i],"previous")),
+ UpgradeUtilities.checksumContents(DATA_NODE,
+ new File(baseDirs[i],"previous"), false),
UpgradeUtilities.checksumMasterDataNodeContents());
}
}
@@ -290,8 +292,8 @@ public class TestDFSStorageStateRecovery
if (currentShouldExist) {
for (int i = 0; i < baseDirs.length; i++) {
File bpCurDir = new File(baseDirs[i], Storage.STORAGE_DIR_CURRENT);
- assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir),
- UpgradeUtilities.checksumMasterBlockPoolContents());
+ assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir,
+ false), UpgradeUtilities.checksumMasterBlockPoolContents());
}
}
if (previousShouldExist) {
@@ -299,8 +301,8 @@ public class TestDFSStorageStateRecovery
File bpPrevDir = new File(baseDirs[i], Storage.STORAGE_DIR_PREVIOUS);
assertTrue(bpPrevDir.isDirectory());
assertEquals(
- UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir),
- UpgradeUtilities.checksumMasterBlockPoolContents());
+ UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir,
+ false), UpgradeUtilities.checksumMasterBlockPoolContents());
}
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java Fri Aug 1 20:41:05 2014
@@ -100,7 +100,7 @@ public class TestDFSUpgrade {
File previous = new File(baseDir, "previous");
assertExists(previous);
- assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous),
+ assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous, false),
UpgradeUtilities.checksumMasterNameNodeContents());
}
}
@@ -114,23 +114,25 @@ public class TestDFSUpgrade {
void checkDataNode(String[] baseDirs, String bpid) throws IOException {
for (int i = 0; i < baseDirs.length; i++) {
File current = new File(baseDirs[i], "current/" + bpid + "/current");
- assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current),
+ assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current, false),
UpgradeUtilities.checksumMasterDataNodeContents());
// block files are placed under <sd>/current/<bpid>/current/finalized
File currentFinalized =
MiniDFSCluster.getFinalizedDir(new File(baseDirs[i]), bpid);
- assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, currentFinalized),
+ assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
+ currentFinalized, true),
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
File previous = new File(baseDirs[i], "current/" + bpid + "/previous");
assertTrue(previous.isDirectory());
- assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous),
+ assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous, false),
UpgradeUtilities.checksumMasterDataNodeContents());
File previousFinalized =
new File(baseDirs[i], "current/" + bpid + "/previous"+"/finalized");
- assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previousFinalized),
+ assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
+ previousFinalized, true),
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Fri Aug 1 20:41:05 2014
@@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
import java.io.BufferedReader;
import java.io.File;
+import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
@@ -80,7 +81,7 @@ public class TestDFSUpgradeFromImage {
long checksum;
}
- private static final Configuration upgradeConf;
+ static final Configuration upgradeConf;
static {
upgradeConf = new HdfsConfiguration();
@@ -95,7 +96,7 @@ public class TestDFSUpgradeFromImage {
boolean printChecksum = false;
- private void unpackStorage(String tarFileName)
+ void unpackStorage(String tarFileName, String referenceName)
throws IOException {
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+ "/" + tarFileName;
@@ -110,7 +111,7 @@ public class TestDFSUpgradeFromImage {
BufferedReader reader = new BufferedReader(new FileReader(
System.getProperty("test.cache.data", "build/test/cache")
- + "/" + HADOOP_DFS_DIR_TXT));
+ + "/" + referenceName));
String line;
while ( (line = reader.readLine()) != null ) {
@@ -285,7 +286,7 @@ public class TestDFSUpgradeFromImage {
*/
@Test
public void testUpgradeFromRel22Image() throws IOException {
- unpackStorage(HADOOP22_IMAGE);
+ unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT);
upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
numDataNodes(4));
}
@@ -296,7 +297,7 @@ public class TestDFSUpgradeFromImage {
*/
@Test
public void testUpgradeFromCorruptRel22Image() throws IOException {
- unpackStorage(HADOOP22_IMAGE);
+ unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT);
// Overwrite the md5 stored in the VERSION files
File baseDir = new File(MiniDFSCluster.getBaseDirectory());
@@ -333,7 +334,7 @@ public class TestDFSUpgradeFromImage {
*/
@Test
public void testUpgradeFromRel1ReservedImage() throws Exception {
- unpackStorage(HADOOP1_RESERVED_IMAGE);
+ unpackStorage(HADOOP1_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster = null;
// Try it once without setting the upgrade flag to ensure it fails
final Configuration conf = new Configuration();
@@ -403,7 +404,7 @@ public class TestDFSUpgradeFromImage {
*/
@Test
public void testUpgradeFromRel023ReservedImage() throws Exception {
- unpackStorage(HADOOP023_RESERVED_IMAGE);
+ unpackStorage(HADOOP023_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster = null;
// Try it once without setting the upgrade flag to ensure it fails
final Configuration conf = new Configuration();
@@ -468,7 +469,7 @@ public class TestDFSUpgradeFromImage {
*/
@Test
public void testUpgradeFromRel2ReservedImage() throws Exception {
- unpackStorage(HADOOP2_RESERVED_IMAGE);
+ unpackStorage(HADOOP2_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster = null;
// Try it once without setting the upgrade flag to ensure it fails
final Configuration conf = new Configuration();
@@ -572,7 +573,7 @@ public class TestDFSUpgradeFromImage {
} while (dirList.hasMore());
}
- private void upgradeAndVerify(MiniDFSCluster.Builder bld)
+ void upgradeAndVerify(MiniDFSCluster.Builder bld)
throws IOException {
MiniDFSCluster cluster = null;
try {
@@ -601,7 +602,7 @@ public class TestDFSUpgradeFromImage {
*/
@Test
public void testUpgradeFromRel1BBWImage() throws IOException {
- unpackStorage(HADOOP1_BBW_IMAGE);
+ unpackStorage(HADOOP1_BBW_IMAGE, HADOOP_DFS_DIR_TXT);
Configuration conf = new Configuration(upgradeConf);
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
System.getProperty("test.build.data") + File.separator +
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Fri Aug 1 20:41:05 2014
@@ -445,19 +445,14 @@ public class TestDatanodeBlockScanner {
@Test
public void testReplicaInfoParsing() throws Exception {
- testReplicaInfoParsingSingle(BASE_PATH, new int[0]);
- testReplicaInfoParsingSingle(BASE_PATH + "/subdir1", new int[]{1});
- testReplicaInfoParsingSingle(BASE_PATH + "/subdir43", new int[]{43});
- testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir3", new int[]{1, 2, 3});
- testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir43", new int[]{1, 2, 43});
- testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir23/subdir3", new int[]{1, 23, 3});
- testReplicaInfoParsingSingle(BASE_PATH + "/subdir13/subdir2/subdir3", new int[]{13, 2, 3});
+ testReplicaInfoParsingSingle(BASE_PATH);
+ testReplicaInfoParsingSingle(BASE_PATH + "/subdir1");
+ testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir3");
}
- private static void testReplicaInfoParsingSingle(String subDirPath, int[] expectedSubDirs) {
+ private static void testReplicaInfoParsingSingle(String subDirPath) {
File testFile = new File(subDirPath);
- assertArrayEquals(expectedSubDirs, ReplicaInfo.parseSubDirs(testFile).subDirs);
- assertEquals(BASE_PATH, ReplicaInfo.parseSubDirs(testFile).baseDirPath);
+ assertEquals(BASE_PATH, ReplicaInfo.parseBaseDir(testFile).baseDirPath);
}
@Test
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java?rev=1615223&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java Fri Aug 1 20:41:05 2014
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+
+public class TestDatanodeLayoutUpgrade {
+ private static final String HADOOP_DATANODE_DIR_TXT =
+ "hadoop-datanode-dir.txt";
+ private static final String HADOOP24_DATANODE = "hadoop-24-datanode-dir.tgz";
+
+ @Test
+ // Upgrade from LDir-based layout to block ID-based layout -- change described
+ // in HDFS-6482
+ public void testUpgradeToIdBasedLayout() throws IOException {
+ TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage();
+ upgrade.unpackStorage(HADOOP24_DATANODE, HADOOP_DATANODE_DIR_TXT);
+ Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf);
+ conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+ System.getProperty("test.build.data") + File.separator +
+ "dfs" + File.separator + "data");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ System.getProperty("test.build.data") + File.separator +
+ "dfs" + File.separator + "name");
+ upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf).numDataNodes(1)
+ .manageDataDfsDirs(false).manageNameDfsDirs(false));
+ }
+}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java Fri Aug 1 20:41:05 2014
@@ -27,6 +27,7 @@ import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.util.ArrayList;
+import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -35,6 +36,7 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
@@ -137,13 +139,15 @@ public class TestFileCorruption {
final String bpid = cluster.getNamesystem().getBlockPoolId();
File storageDir = cluster.getInstanceStorageDir(0, 0);
File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+ assertTrue("Data directory does not exist", dataDir.exists());
ExtendedBlock blk = getBlock(bpid, dataDir);
if (blk == null) {
storageDir = cluster.getInstanceStorageDir(0, 1);
dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
blk = getBlock(bpid, dataDir);
}
- assertFalse(blk==null);
+ assertFalse("Data directory does not contain any blocks or there was an "
+ + "IO error", blk==null);
// start a third datanode
cluster.startDataNodes(conf, 1, true, null, null);
@@ -174,33 +178,15 @@ public class TestFileCorruption {
}
- private ExtendedBlock getBlock(String bpid, File dataDir) {
- assertTrue("data directory does not exist", dataDir.exists());
- File[] blocks = dataDir.listFiles();
- assertTrue("Blocks do not exist in dataDir", (blocks != null) && (blocks.length > 0));
-
- int idx = 0;
- String blockFileName = null;
- for (; idx < blocks.length; idx++) {
- blockFileName = blocks[idx].getName();
- if (blockFileName.startsWith("blk_") && !blockFileName.endsWith(".meta")) {
- break;
- }
- }
- if (blockFileName == null) {
+ public static ExtendedBlock getBlock(String bpid, File dataDir) {
+ List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir);
+ if (metadataFiles == null || metadataFiles.isEmpty()) {
return null;
}
- long blockId = Long.parseLong(blockFileName.substring("blk_".length()));
- long blockTimeStamp = GenerationStamp.GRANDFATHER_GENERATION_STAMP;
- for (idx=0; idx < blocks.length; idx++) {
- String fileName = blocks[idx].getName();
- if (fileName.startsWith(blockFileName) && fileName.endsWith(".meta")) {
- int startIndex = blockFileName.length()+1;
- int endIndex = fileName.length() - ".meta".length();
- blockTimeStamp = Long.parseLong(fileName.substring(startIndex, endIndex));
- break;
- }
- }
- return new ExtendedBlock(bpid, blockId, blocks[idx].length(), blockTimeStamp);
+ File metadataFile = metadataFiles.get(0);
+ File blockFile = Block.metaToBlockFile(metadataFile);
+ return new ExtendedBlock(bpid, Block.getBlockId(blockFile.getName()),
+ blockFile.length(), Block.getGenerationStamp(metadataFile.getName()));
}
+
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java Fri Aug 1 20:41:05 2014
@@ -158,21 +158,23 @@ public class UpgradeUtilities {
FileUtil.fullyDelete(new File(datanodeStorage,"in_use.lock"));
}
namenodeStorageChecksum = checksumContents(NAME_NODE,
- new File(namenodeStorage, "current"));
+ new File(namenodeStorage, "current"), false);
File dnCurDir = new File(datanodeStorage, "current");
- datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir);
+ datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir, false);
File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
"current");
- blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir);
+ blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir, false);
File bpCurFinalizeDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
"current/"+DataStorage.STORAGE_DIR_FINALIZED);
- blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE, bpCurFinalizeDir);
+ blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE,
+ bpCurFinalizeDir, true);
File bpCurRbwDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
"current/"+DataStorage.STORAGE_DIR_RBW);
- blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir);
+ blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir,
+ false);
}
// Private helper method that writes a file to the given file system.
@@ -266,36 +268,47 @@ public class UpgradeUtilities {
/**
* Compute the checksum of all the files in the specified directory.
- * The contents of subdirectories are not included. This method provides
- * an easy way to ensure equality between the contents of two directories.
+ * This method provides an easy way to ensure equality between the contents
+ * of two directories.
*
* @param nodeType if DATA_NODE then any file named "VERSION" is ignored.
* This is because this file file is changed every time
* the Datanode is started.
- * @param dir must be a directory. Subdirectories are ignored.
+ * @param dir must be a directory
+ * @param recursive whether or not to consider subdirectories
*
* @throws IllegalArgumentException if specified directory is not a directory
* @throws IOException if an IOException occurs while reading the files
* @return the computed checksum value
*/
- public static long checksumContents(NodeType nodeType, File dir) throws IOException {
+ public static long checksumContents(NodeType nodeType, File dir,
+ boolean recursive) throws IOException {
+ CRC32 checksum = new CRC32();
+ checksumContentsHelper(nodeType, dir, checksum, recursive);
+ return checksum.getValue();
+ }
+
+ public static void checksumContentsHelper(NodeType nodeType, File dir,
+ CRC32 checksum, boolean recursive) throws IOException {
if (!dir.isDirectory()) {
throw new IllegalArgumentException(
- "Given argument is not a directory:" + dir);
+ "Given argument is not a directory:" + dir);
}
File[] list = dir.listFiles();
Arrays.sort(list);
- CRC32 checksum = new CRC32();
for (int i = 0; i < list.length; i++) {
if (!list[i].isFile()) {
+ if (recursive) {
+ checksumContentsHelper(nodeType, list[i], checksum, recursive);
+ }
continue;
}
// skip VERSION and dfsUsed file for DataNodes
- if (nodeType == DATA_NODE &&
- (list[i].getName().equals("VERSION") ||
- list[i].getName().equals("dfsUsed"))) {
- continue;
+ if (nodeType == DATA_NODE &&
+ (list[i].getName().equals("VERSION") ||
+ list[i].getName().equals("dfsUsed"))) {
+ continue;
}
FileInputStream fis = null;
@@ -312,7 +325,6 @@ public class UpgradeUtilities {
}
}
}
- return checksum.getValue();
}
/**
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Fri Aug 1 20:41:05 2014
@@ -25,6 +25,7 @@ import java.io.FilenameFilter;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -384,7 +385,7 @@ public class TestDataNodeVolumeFailure {
continue;
}
- String [] res = metaFilesInDir(dir);
+ List<File> res = MiniDFSCluster.getAllBlockMetadataFiles(dir);
if(res == null) {
System.out.println("res is null for dir = " + dir + " i=" + i + " and j=" + j);
continue;
@@ -392,7 +393,8 @@ public class TestDataNodeVolumeFailure {
//System.out.println("for dn" + i + "." + j + ": " + dir + "=" + res.length+ " files");
//int ii = 0;
- for(String s: res) {
+ for(File f: res) {
+ String s = f.getName();
// cut off "blk_-" at the beginning and ".meta" at the end
assertNotNull("Block file name should not be null", s);
String bid = s.substring(s.indexOf("_")+1, s.lastIndexOf("_"));
@@ -408,25 +410,9 @@ public class TestDataNodeVolumeFailure {
//System.out.println("dir1="+dir.getPath() + "blocks=" + res.length);
//System.out.println("dir2="+dir2.getPath() + "blocks=" + res2.length);
- total += res.length;
+ total += res.size();
}
}
return total;
}
-
- /*
- * count how many files *.meta are in the dir
- */
- private String [] metaFilesInDir(File dir) {
- String [] res = dir.list(
- new FilenameFilter() {
- @Override
- public boolean accept(File dir, String name) {
- return name.startsWith("blk_") &&
- name.endsWith(Block.METADATA_EXTENSION);
- }
- }
- );
- return res;
- }
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java Fri Aug 1 20:41:05 2014
@@ -103,9 +103,10 @@ public class TestDeleteBlockPool {
fs1.delete(new Path("/alpha"), true);
// Wait till all blocks are deleted from the dn2 for bpid1.
- while ((MiniDFSCluster.getFinalizedDir(dn2StorageDir1,
- bpid1).list().length != 0) || (MiniDFSCluster.getFinalizedDir(
- dn2StorageDir2, bpid1).list().length != 0)) {
+ File finalDir1 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid1);
+ File finalDir2 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid2);
+ while ((!DatanodeUtil.dirNoFilesRecursive(finalDir1)) ||
+ (!DatanodeUtil.dirNoFilesRecursive(finalDir2))) {
try {
Thread.sleep(3000);
} catch (Exception ignored) {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1615223&r1=1615222&r2=1615223&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Fri Aug 1 20:41:05 2014
@@ -41,6 +41,7 @@ import java.net.InetSocketAddress;
import java.nio.channels.FileChannel;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
@@ -63,6 +64,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -750,15 +752,14 @@ public class TestFsck {
for (int j=0; j<=1; j++) {
File storageDir = cluster.getInstanceStorageDir(i, j);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
- File[] blocks = data_dir.listFiles();
- if (blocks == null)
+ List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
+ data_dir);
+ if (metadataFiles == null)
continue;
-
- for (int idx = 0; idx < blocks.length; idx++) {
- if (!blocks[idx].getName().startsWith("blk_")) {
- continue;
- }
- assertTrue("Cannot remove file.", blocks[idx].delete());
+ for (File metadataFile : metadataFiles) {
+ File blockFile = Block.metaToBlockFile(metadataFile);
+ assertTrue("Cannot remove file.", blockFile.delete());
+ assertTrue("Cannot remove file.", metadataFile.delete());
}
}
}