You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2013/04/10 22:17:51 UTC
svn commit: r1466658 [1/2] - in
/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/ src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/protocol/
src/main/java/org/apache/hadoop/hdfs/protocolPB/ s...
Author: szetszwo
Date: Wed Apr 10 20:17:39 2013
New Revision: 1466658
URL: http://svn.apache.org/r1466658
Log:
Merge r1464808 through r1466652 from trunk.
Added:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java
- copied unchanged from r1466652, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java
- copied unchanged from r1466652, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/GSet.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/GSetByHashMap.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightGSet.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestGSet.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1464808-1466652
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Apr 10 20:17:39 2013
@@ -318,6 +318,9 @@ Trunk (Unreleased)
HDFS-4586. TestDataDirs.testGetDataDirsFromURIs fails with all directories
in dfs.datanode.data.dir are invalid. (Ivan Mitic via atm)
+ HDFS-4646. createNNProxyWithClientProtocol ignores configured timeout
+ value (Jagane Sundar via cos)
+
BREAKDOWN OF HADOOP-8562 SUBTASKS AND RELATED JIRAS
HDFS-4145. Merge hdfs cmd line scripts from branch-1-win. (David Lao,
@@ -349,12 +352,18 @@ Trunk (Unreleased)
HDFS-4625. Make TestNNWithQJM#testNewNamenodeTakesOverWriter work on
Windows. (Ivan Mitic via suresh)
+ HDFS-4674. TestBPOfferService fails on Windows due to failure parsing
+ datanode data directory as URI. (Chris Nauroth via suresh)
+
Release 2.0.5-beta - UNRELEASED
INCOMPATIBLE CHANGES
NEW FEATURES
+ HDFS-1804. Add a new block-volume device choosing policy that looks at
+ free space. (atm)
+
IMPROVEMENTS
HDFS-4222. NN is unresponsive and loses heartbeats from DNs when
@@ -384,6 +393,12 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4618. Default transaction interval for checkpoints is too low. (todd)
+ HDFS-4525. Provide an API for knowing that whether file is closed or not.
+ (SreeHari via umamahesh)
+
+ HDFS-3940. Add Gset#clear method and clear the block map when namenode is
+ shutdown. (suresh)
+
OPTIMIZATIONS
BUG FIXES
@@ -474,6 +489,17 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4658. Standby NN will log that it has received a block report "after
becoming active" (atm)
+ HDFS-3981. Fix handling of FSN lock in getBlockLocations. (Xiaobo Peng
+ and todd via todd)
+
+ HDFS-4676. TestHDFSFileSystemContract should set MiniDFSCluster variable
+ to null to free up memory. (suresh)
+
+ HDFS-4669. TestBlockPoolManager fails using IBM java. (Tian Hong Wang via
+ suresh)
+
+ HDFS-4643. Fix flakiness in TestQuorumJournalManager. (todd)
+
Release 2.0.4-alpha - UNRELEASED
INCOMPATIBLE CHANGES
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1464808-1466652
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Wed Apr 10 20:17:39 2013
@@ -1554,7 +1554,22 @@ public class DFSClient implements java.i
UnresolvedPathException.class);
}
}
-
+
+ /**
+ * Close status of a file
+ * @return true if file is already closed
+ */
+ public boolean isFileClosed(String src) throws IOException{
+ checkOpen();
+ try {
+ return namenode.isFileClosed(src);
+ } catch(RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class,
+ UnresolvedPathException.class);
+ }
+ }
+
/**
* Get the file info for a specific file or directory. If src
* refers to a symlink then the FileStatus of the link is returned.
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Wed Apr 10 20:17:39 2013
@@ -369,6 +369,10 @@ public class DFSConfigKeys extends Commo
public static final String DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins";
public static final String DFS_DATANODE_FSDATASET_FACTORY_KEY = "dfs.datanode.fsdataset.factory";
public static final String DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY = "dfs.datanode.fsdataset.volume.choosing.policy";
+ public static final String DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_BALANCED_SPACE_THRESHOLD_KEY = "dfs.datanode.available-space-volume-choosing-policy.balanced-space-threshold";
+ public static final long DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_BALANCED_SPACE_THRESHOLD_DEFAULT = 1024L * 1024L * 1024L * 10L; // 10 GB
+ public static final String DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_BALANCED_SPACE_PREFERENCE_PERCENT_KEY = "dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-percent";
+ public static final float DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_BALANCED_SPACE_PREFERENCE_PERCENT_DEFAULT = 0.75f;
public static final String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = "dfs.datanode.socket.write.timeout";
public static final String DFS_DATANODE_STARTUP_KEY = "dfs.datanode.startup";
public static final String DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins";
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Wed Apr 10 20:17:39 2013
@@ -507,7 +507,7 @@ public class DFSUtil {
// Look for configurations of the form <key>[.<nameserviceId>][.<namenodeId>]
// across all of the configured nameservices and namenodes.
- Map<String, Map<String, InetSocketAddress>> ret = Maps.newHashMap();
+ Map<String, Map<String, InetSocketAddress>> ret = Maps.newLinkedHashMap();
for (String nsId : emptyAsSingletonNull(nameserviceIds)) {
Map<String, InetSocketAddress> isas =
getAddressesForNameserviceId(conf, nsId, defaultAddress, keys);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Wed Apr 10 20:17:39 2013
@@ -968,4 +968,17 @@ public class DistributedFileSystem exten
String fromSnapshot, String toSnapshot) throws IOException {
return dfs.getSnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot);
}
+
+ /**
+ * Get the close status of a file
+ * @param src The path to the file
+ *
+ * @return return true if file is closed
+ * @throws FileNotFoundException if the file does not exist.
+ * @throws IOException If an I/O error occurred
+ */
+ public boolean isFileClosed(Path src) throws IOException {
+ return dfs.isFileClosed(getPathName(src));
+ }
+
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java Wed Apr 10 20:17:39 2013
@@ -260,7 +260,9 @@ public class NameNodeProxies {
final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
ClientNamenodeProtocolPB.class, version, address, ugi, conf,
- NetUtils.getDefaultSocketFactory(conf), 0, defaultPolicy).getProxy();
+ NetUtils.getDefaultSocketFactory(conf),
+ org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy)
+ .getProxy();
if (withRetries) { // create the proxy with retries
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Wed Apr 10 20:17:39 2013
@@ -778,7 +778,21 @@ public interface ClientProtocol {
@Idempotent
public HdfsFileStatus getFileInfo(String src) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException;
-
+
+ /**
+ * Get the close status of a file
+ * @param src The string representation of the path to the file
+ *
+ * @return return true if file is closed
+ * @throws AccessControlException permission denied
+ * @throws FileNotFoundException file <code>src</code> is not found
+ * @throws UnresolvedLinkException if the path contains a symlink.
+ * @throws IOException If an I/O error occurred
+ */
+ @Idempotent
+ public boolean isFileClosed(String src) throws AccessControlException,
+ FileNotFoundException, UnresolvedLinkException, IOException;
+
/**
* Get the file info for a specific file or directory. If the path
* refers to a symlink then the FileStatus of the symlink is returned.
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java Wed Apr 10 20:17:39 2013
@@ -97,7 +97,8 @@ public class LayoutVersion {
"Serialize block lists with delta-encoded variable length ints, " +
"add OP_UPDATE_BLOCKS"),
RESERVED_REL1_2_0(-41, -32, "Reserved for release 1.2.0", true, CONCAT),
- SNAPSHOT(-42, -40, "Support for snapshot feature", false);
+ ADD_INODE_ID(-42, -40, "Assign a unique inode id for each inode", false),
+ SNAPSHOT(-43, -42, "Support for snapshot feature", false);
final int lv;
final int ancestorLV;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java Wed Apr 10 20:17:39 2013
@@ -90,6 +90,8 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
@@ -984,4 +986,17 @@ public class ClientNamenodeProtocolServe
throw new ServiceException(e);
}
}
+
+ @Override
+ public IsFileClosedResponseProto isFileClosed(
+ RpcController controller, IsFileClosedRequestProto request)
+ throws ServiceException {
+ try {
+ boolean result = server.isFileClosed(request.getSrc());
+ return IsFileClosedResponseProto.newBuilder().setResult(result).build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Wed Apr 10 20:17:39 2013
@@ -86,6 +86,7 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
@@ -861,6 +862,19 @@ public class ClientNamenodeProtocolTrans
throw ProtobufHelper.getRemoteException(e);
}
}
+
+
+ @Override
+ public boolean isFileClosed(String src) throws AccessControlException,
+ FileNotFoundException, UnresolvedLinkException, IOException {
+ IsFileClosedRequestProto req = IsFileClosedRequestProto.newBuilder()
+ .setSrc(src).build();
+ try {
+ return rpcProxy.isFileClosed(null, req).getResult();
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
@Override
public Object getUnderlyingProxyObject() {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Wed Apr 10 20:17:39 2013
@@ -3186,4 +3186,7 @@ assert storedBlock.findDatanode(dn) < 0
OK
}
+ public void shutdown() {
+ blocksMap.close();
+ }
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java Wed Apr 10 20:17:39 2013
@@ -67,7 +67,7 @@ class BlocksMap {
void close() {
- // Empty blocks once GSet#clear is implemented (HDFS-3940)
+ blocks.clear();
}
BlockCollection getBlockCollection(Block b) {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java Wed Apr 10 20:17:39 2013
@@ -160,8 +160,8 @@ class BlockPoolManager {
Map<String, Map<String, InetSocketAddress>> addrMap) throws IOException {
assert Thread.holdsLock(refreshNamenodesLock);
- Set<String> toRefresh = Sets.newHashSet();
- Set<String> toAdd = Sets.newHashSet();
+ Set<String> toRefresh = Sets.newLinkedHashSet();
+ Set<String> toAdd = Sets.newLinkedHashSet();
Set<String> toRemove;
synchronized (this) {
@@ -239,4 +239,4 @@ class BlockPoolManager {
protected BPOfferService createBPOS(List<InetSocketAddress> nnAddrs) {
return new BPOfferService(nnAddrs, dn);
}
-}
\ No newline at end of file
+}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed Apr 10 20:17:39 2013
@@ -2388,4 +2388,8 @@ public class FSDirectory implements Clos
inode.setLocalName(name.getBytes());
}
}
+
+ void shutdown() {
+ nameCache.reset();
+ }
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Wed Apr 10 20:17:39 2013
@@ -661,6 +661,7 @@ public class FSEditLog implements LogsPu
*/
public void logOpenFile(String path, INodeFileUnderConstruction newNode) {
AddOp op = AddOp.getInstance(cache.get())
+ .setInodeId(newNode.getId())
.setPath(path)
.setReplication(newNode.getFileReplication())
.setModificationTime(newNode.getModificationTime())
@@ -702,6 +703,7 @@ public class FSEditLog implements LogsPu
*/
public void logMkDir(String path, INode newNode) {
MkdirOp op = MkdirOp.getInstance(cache.get())
+ .setInodeId(newNode.getId())
.setPath(path)
.setTimestamp(newNode.getModificationTime())
.setPermissionStatus(newNode.getPermissionStatus());
@@ -819,6 +821,7 @@ public class FSEditLog implements LogsPu
void logSymlink(String path, String value, long mtime,
long atime, INodeSymlink node) {
SymlinkOp op = SymlinkOp.getInstance(cache.get())
+ .setId(node.getId())
.setPath(path)
.setValue(value)
.setModificationTime(mtime)
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Wed Apr 10 20:17:39 2013
@@ -32,6 +32,7 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.Storage;
@@ -174,7 +175,7 @@ public class FSEditLogLoader {
}
}
try {
- long inodeId = applyEditLogOp(op, fsDir, in.getVersion());
+ long inodeId = applyEditLogOp(op, fsDir, in.getVersion(), lastInodeId);
if (lastInodeId < inodeId) {
lastInodeId = inodeId;
}
@@ -230,9 +231,30 @@ public class FSEditLogLoader {
return numEdits;
}
+ // allocate and update last allocated inode id
+ private long getAndUpdateLastInodeId(long inodeIdFromOp, int logVersion,
+ long lastInodeId) throws IOException {
+ long inodeId = inodeIdFromOp;
+
+ if (inodeId == INodeId.GRANDFATHER_INODE_ID) {
+ if (LayoutVersion.supports(Feature.ADD_INODE_ID, logVersion)) {
+ throw new IOException("The layout version " + logVersion
+ + " supports inodeId but gave bogus inodeId");
+ }
+ inodeId = fsNamesys.allocateNewInodeId();
+ } else {
+ // need to reset lastInodeId. fsnamesys gets lastInodeId firstly from
+ // fsimage but editlog captures more recent inodeId allocations
+ if (inodeId > lastInodeId) {
+ fsNamesys.resetLastInodeId(inodeId);
+ }
+ }
+ return inodeId;
+ }
+
@SuppressWarnings("deprecation")
private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
- int logVersion) throws IOException {
+ int logVersion, long lastInodeId) throws IOException {
long inodeId = INodeId.GRANDFATHER_INODE_ID;
if (LOG.isTraceEnabled()) {
LOG.trace("replaying edit log: " + op);
@@ -265,7 +287,8 @@ public class FSEditLogLoader {
assert addCloseOp.blocks.length == 0;
// add to the file tree
- inodeId = fsNamesys.allocateNewInodeId();
+ inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion,
+ lastInodeId);
newFile = fsDir.unprotectedAddFile(inodeId,
addCloseOp.path, addCloseOp.permissions, replication,
addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true,
@@ -373,7 +396,8 @@ public class FSEditLogLoader {
}
case OP_MKDIR: {
MkdirOp mkdirOp = (MkdirOp)op;
- inodeId = fsNamesys.allocateNewInodeId();
+ inodeId = getAndUpdateLastInodeId(mkdirOp.inodeId, logVersion,
+ lastInodeId);
fsDir.unprotectedMkdir(inodeId, mkdirOp.path, mkdirOp.permissions,
mkdirOp.timestamp);
break;
@@ -427,7 +451,8 @@ public class FSEditLogLoader {
}
case OP_SYMLINK: {
SymlinkOp symlinkOp = (SymlinkOp)op;
- inodeId = fsNamesys.allocateNewInodeId();
+ inodeId = getAndUpdateLastInodeId(symlinkOp.inodeId, logVersion,
+ lastInodeId);
fsDir.unprotectedAddSymlink(inodeId, symlinkOp.path,
symlinkOp.value, symlinkOp.mtime,
symlinkOp.atime, symlinkOp.permissionStatus);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Wed Apr 10 20:17:39 2013
@@ -164,6 +164,7 @@ public abstract class FSEditLogOp {
@SuppressWarnings("unchecked")
static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatingOp {
int length;
+ long inodeId;
String path;
short replication;
long mtime;
@@ -178,6 +179,11 @@ public abstract class FSEditLogOp {
super(opCode);
assert(opCode == OP_ADD || opCode == OP_CLOSE);
}
+
+ <T extends AddCloseOp> T setInodeId(long inodeId) {
+ this.inodeId = inodeId;
+ return (T)this;
+ }
<T extends AddCloseOp> T setPath(String path) {
this.path = path;
@@ -241,6 +247,7 @@ public abstract class FSEditLogOp {
@Override
public
void writeFields(DataOutputStream out) throws IOException {
+ FSImageSerialization.writeLong(inodeId, out);
FSImageSerialization.writeString(path, out);
FSImageSerialization.writeShort(replication, out);
FSImageSerialization.writeLong(mtime, out);
@@ -261,6 +268,12 @@ public abstract class FSEditLogOp {
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
}
+ if (LayoutVersion.supports(Feature.ADD_INODE_ID, logVersion)) {
+ this.inodeId = in.readLong();
+ } else {
+ // The inodeId should be updated when this editLogOp is applied
+ this.inodeId = INodeId.GRANDFATHER_INODE_ID;
+ }
if ((-17 < logVersion && length != 4) ||
(logVersion <= -17 && length != 5 && !LayoutVersion.supports(
Feature.EDITLOG_OP_OPTIMIZATION, logVersion))) {
@@ -333,6 +346,8 @@ public abstract class FSEditLogOp {
StringBuilder builder = new StringBuilder();
builder.append("[length=");
builder.append(length);
+ builder.append(", inodeId=");
+ builder.append(inodeId);
builder.append(", path=");
builder.append(path);
builder.append(", replication=");
@@ -363,6 +378,8 @@ public abstract class FSEditLogOp {
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "LENGTH",
Integer.valueOf(length).toString());
+ XMLUtils.addSaxString(contentHandler, "INODEID",
+ Long.valueOf(inodeId).toString());
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "REPLICATION",
Short.valueOf(replication).toString());
@@ -382,6 +399,7 @@ public abstract class FSEditLogOp {
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.length = Integer.valueOf(st.getValue("LENGTH"));
+ this.inodeId = Long.valueOf(st.getValue("INODEID"));
this.path = st.getValue("PATH");
this.replication = Short.valueOf(st.getValue("REPLICATION"));
this.mtime = Long.valueOf(st.getValue("MTIME"));
@@ -913,6 +931,7 @@ public abstract class FSEditLogOp {
static class MkdirOp extends FSEditLogOp {
int length;
+ long inodeId;
String path;
long timestamp;
PermissionStatus permissions;
@@ -925,6 +944,11 @@ public abstract class FSEditLogOp {
return (MkdirOp)cache.get(OP_MKDIR);
}
+ MkdirOp setInodeId(long inodeId) {
+ this.inodeId = inodeId;
+ return this;
+ }
+
MkdirOp setPath(String path) {
this.path = path;
return this;
@@ -943,6 +967,7 @@ public abstract class FSEditLogOp {
@Override
public
void writeFields(DataOutputStream out) throws IOException {
+ FSImageSerialization.writeLong(inodeId, out);
FSImageSerialization.writeString(path, out);
FSImageSerialization.writeLong(timestamp, out); // mtime
FSImageSerialization.writeLong(timestamp, out); // atime, unused at this
@@ -959,6 +984,12 @@ public abstract class FSEditLogOp {
&& !LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
throw new IOException("Incorrect data format. Mkdir operation.");
}
+ if (LayoutVersion.supports(Feature.ADD_INODE_ID, logVersion)) {
+ this.inodeId = FSImageSerialization.readLong(in);
+ } else {
+ // This id should be updated when this editLogOp is applied
+ this.inodeId = INodeId.GRANDFATHER_INODE_ID;
+ }
this.path = FSImageSerialization.readString(in);
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.timestamp = FSImageSerialization.readLong(in);
@@ -985,6 +1016,8 @@ public abstract class FSEditLogOp {
StringBuilder builder = new StringBuilder();
builder.append("MkdirOp [length=");
builder.append(length);
+ builder.append(", inodeId=");
+ builder.append(inodeId);
builder.append(", path=");
builder.append(path);
builder.append(", timestamp=");
@@ -1003,6 +1036,8 @@ public abstract class FSEditLogOp {
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "LENGTH",
Integer.valueOf(length).toString());
+ XMLUtils.addSaxString(contentHandler, "INODEID",
+ Long.valueOf(inodeId).toString());
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "TIMESTAMP",
Long.valueOf(timestamp).toString());
@@ -1011,6 +1046,7 @@ public abstract class FSEditLogOp {
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.length = Integer.valueOf(st.getValue("LENGTH"));
+ this.inodeId = Long.valueOf(st.getValue("INODEID"));
this.path = st.getValue("PATH");
this.timestamp = Long.valueOf(st.getValue("TIMESTAMP"));
this.permissions =
@@ -1489,6 +1525,7 @@ public abstract class FSEditLogOp {
static class SymlinkOp extends FSEditLogOp {
int length;
+ long inodeId;
String path;
String value;
long mtime;
@@ -1503,6 +1540,11 @@ public abstract class FSEditLogOp {
return (SymlinkOp)cache.get(OP_SYMLINK);
}
+ SymlinkOp setId(long inodeId) {
+ this.inodeId = inodeId;
+ return this;
+ }
+
SymlinkOp setPath(String path) {
this.path = path;
return this;
@@ -1531,6 +1573,7 @@ public abstract class FSEditLogOp {
@Override
public
void writeFields(DataOutputStream out) throws IOException {
+ FSImageSerialization.writeLong(inodeId, out);
FSImageSerialization.writeString(path, out);
FSImageSerialization.writeString(value, out);
FSImageSerialization.writeLong(mtime, out);
@@ -1548,6 +1591,12 @@ public abstract class FSEditLogOp {
+ "symlink operation.");
}
}
+ if (LayoutVersion.supports(Feature.ADD_INODE_ID, logVersion)) {
+ this.inodeId = FSImageSerialization.readLong(in);
+ } else {
+ // This id should be updated when the editLogOp is applied
+ this.inodeId = INodeId.GRANDFATHER_INODE_ID;
+ }
this.path = FSImageSerialization.readString(in);
this.value = FSImageSerialization.readString(in);
@@ -1566,6 +1615,8 @@ public abstract class FSEditLogOp {
StringBuilder builder = new StringBuilder();
builder.append("SymlinkOp [length=");
builder.append(length);
+ builder.append(", inodeId=");
+ builder.append(inodeId);
builder.append(", path=");
builder.append(path);
builder.append(", value=");
@@ -1588,6 +1639,8 @@ public abstract class FSEditLogOp {
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "LENGTH",
Integer.valueOf(length).toString());
+ XMLUtils.addSaxString(contentHandler, "INODEID",
+ Long.valueOf(inodeId).toString());
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "VALUE", value);
XMLUtils.addSaxString(contentHandler, "MTIME",
@@ -1599,6 +1652,7 @@ public abstract class FSEditLogOp {
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.length = Integer.valueOf(st.getValue("LENGTH"));
+ this.inodeId = Long.valueOf(st.getValue("INODEID"));
this.path = st.getValue("PATH");
this.value = st.getValue("VALUE");
this.mtime = Long.valueOf(st.getValue("MTIME"));
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Wed Apr 10 20:17:39 2013
@@ -275,6 +275,20 @@ public class FSImageFormat {
snapshotMap = namesystem.getSnapshotManager().read(in, this);
}
+ // read the last allocated inode id in the fsimage
+ if (LayoutVersion.supports(Feature.ADD_INODE_ID, imgVersion)) {
+ long lastInodeId = in.readLong();
+ namesystem.resetLastInodeId(lastInodeId);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("load last allocated InodeId from fsimage:" + lastInodeId);
+ }
+ } else {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Old layout version doesn't have inode id."
+ + " Will assign new id for each inode.");
+ }
+ }
+
// read compression related info
FSImageCompression compression;
if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imgVersion)) {
@@ -285,8 +299,7 @@ public class FSImageFormat {
in = compression.unwrapInputStream(fin);
LOG.info("Loading image file " + curFile + " using " + compression);
- // reset INodeId. TODO: remove this after inodeId is persisted in fsimage
- namesystem.resetLastInodeIdWithoutChecking(INodeId.LAST_RESERVED_ID);
+
// load all inodes
LOG.info("Number of files = " + numFiles);
if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
@@ -356,7 +369,7 @@ public class FSImageFormat {
* @throws IOException
*/
private void loadLocalNameINodes(long numFiles, DataInput in)
- throws IOException {
+ throws IOException {
assert LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
getLayoutVersion());
assert numFiles > 0;
@@ -546,7 +559,8 @@ public class FSImageFormat {
INode loadINode(final byte[] localName, boolean isSnapshotINode,
DataInput in) throws IOException {
final int imgVersion = getLayoutVersion();
- final long inodeId = namesystem.allocateNewInodeId();
+ long inodeId = LayoutVersion.supports(Feature.ADD_INODE_ID, imgVersion) ?
+ in.readLong() : namesystem.allocateNewInodeId();
final short replication = namesystem.getBlockManager().adjustReplication(
in.readShort());
@@ -662,8 +676,8 @@ public class FSImageFormat {
LOG.info("Number of files under construction = " + size);
for (int i = 0; i < size; i++) {
- INodeFileUnderConstruction cons
- = FSImageSerialization.readINodeUnderConstruction(in);
+ INodeFileUnderConstruction cons = FSImageSerialization
+ .readINodeUnderConstruction(in, namesystem, getLayoutVersion());
// verify that file exists in namespace
String path = cons.getLocalName();
@@ -804,6 +818,8 @@ public class FSImageFormat {
out.writeLong(fsDir.rootDir.numItemsInTree());
out.writeLong(sourceNamesystem.getGenerationStamp());
out.writeLong(context.getTxId());
+ out.writeLong(sourceNamesystem.getLastInodeId());
+
sourceNamesystem.getSnapshotManager().write(out);
// write compression info and set up compressed stream
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java Wed Apr 10 20:17:39 2013
@@ -30,6 +30,8 @@ import org.apache.hadoop.fs.permission.P
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
@@ -103,8 +105,11 @@ public class FSImageSerialization {
// from the input stream
//
static INodeFileUnderConstruction readINodeUnderConstruction(
- DataInput in) throws IOException {
+ DataInput in, FSNamesystem fsNamesys, int imgVersion)
+ throws IOException {
byte[] name = readBytes(in);
+ long inodeId = LayoutVersion.supports(Feature.ADD_INODE_ID, imgVersion) ? in
+ .readLong() : fsNamesys.allocateNewInodeId();
short blockReplication = in.readShort();
long modificationTime = in.readLong();
long preferredBlockSize = in.readLong();
@@ -132,10 +137,16 @@ public class FSImageSerialization {
int numLocs = in.readInt();
assert numLocs == 0 : "Unexpected block locations";
- //TODO: get inodeId from fsimage after inodeId is persisted
- return new INodeFileUnderConstruction(
- INodeId.GRANDFATHER_INODE_ID, name, blockReplication, modificationTime,
- preferredBlockSize, blocks, perm, clientName, clientMachine, null);
+ return new INodeFileUnderConstruction(inodeId,
+ name,
+ blockReplication,
+ modificationTime,
+ preferredBlockSize,
+ blocks,
+ perm,
+ clientName,
+ clientMachine,
+ null);
}
// Helper function that writes an INodeUnderConstruction
@@ -146,6 +157,7 @@ public class FSImageSerialization {
String path)
throws IOException {
writeString(path, out);
+ out.writeLong(cons.getId());
out.writeShort(cons.getFileReplication());
out.writeLong(cons.getModificationTime());
out.writeLong(cons.getPreferredBlockSize());
@@ -168,6 +180,7 @@ public class FSImageSerialization {
public static void writeINodeFile(INodeFile file, DataOutput out,
boolean writeUnderConstruction) throws IOException {
writeLocalName(file, out);
+ out.writeLong(file.getId());
out.writeShort(file.getFileReplication());
out.writeLong(file.getModificationTime());
out.writeLong(file.getAccessTime());
@@ -198,6 +211,7 @@ public class FSImageSerialization {
public static void writeINodeDirectory(INodeDirectory node, DataOutput out)
throws IOException {
writeLocalName(node, out);
+ out.writeLong(node.getId());
out.writeShort(0); // replication
out.writeLong(node.getModificationTime());
out.writeLong(0); // access time
@@ -224,6 +238,7 @@ public class FSImageSerialization {
private static void writeINodeSymlink(INodeSymlink node, DataOutput out)
throws IOException {
writeLocalName(node, out);
+ out.writeLong(node.getId());
out.writeShort(0); // replication
out.writeLong(0); // modification time
out.writeLong(0); // access time
@@ -239,6 +254,7 @@ public class FSImageSerialization {
boolean writeUnderConstruction, ReferenceMap referenceMap
) throws IOException {
writeLocalName(ref, out);
+ out.writeLong(ref.getId());
out.writeShort(0); // replication
out.writeLong(0); // modification time
out.writeLong(0); // access time
@@ -403,4 +419,4 @@ public class FSImageSerialization {
}
return ret;
}
-}
\ No newline at end of file
+}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Apr 10 20:17:39 2013
@@ -1391,14 +1391,14 @@ public class FSNamesystem implements Nam
if (!iip.isSnapshot() //snapshots are readonly, so don't update atime.
&& doAccessTime && isAccessTimeSupported()) {
final long now = now();
- if (now <= inode.getAccessTime() + getAccessTimePrecision()) {
+ if (now > inode.getAccessTime() + getAccessTimePrecision()) {
// if we have to set access time but we only have the readlock, then
// restart this entire operation with the writeLock.
if (isReadOp) {
continue;
}
+ dir.setTimes(src, inode, -1, now, false, iip.getLatestSnapshot());
}
- dir.setTimes(src, inode, -1, now, false, iip.getLatestSnapshot());
}
final long fileSize = iip.getPathSnapshot() != null?
inode.computeFileSize(iip.getPathSnapshot())
@@ -2992,6 +2992,33 @@ public class FSNamesystem implements Nam
logAuditEvent(true, "getfileinfo", src);
return stat;
}
+
+ /**
+ * Returns true if the file is closed
+ */
+ boolean isFileClosed(String src)
+ throws AccessControlException, UnresolvedLinkException,
+ StandbyException, IOException {
+ FSPermissionChecker pc = getPermissionChecker();
+ checkOperation(OperationCategory.READ);
+ readLock();
+ try {
+ checkOperation(OperationCategory.READ);
+ if (isPermissionEnabled) {
+ checkTraverse(pc, src);
+ }
+ return !INodeFile.valueOf(dir.getINode(src), src).isUnderConstruction();
+ } catch (AccessControlException e) {
+ if (isAuditEnabled() && isExternalInvocation()) {
+ logAuditEvent(false, UserGroupInformation.getCurrentUser(),
+ getRemoteIp(),
+ "isFileClosed", src, null, null);
+ }
+ throw e;
+ } finally {
+ readUnlock();
+ }
+ }
/**
* Create all the necessary directories
@@ -4828,8 +4855,15 @@ public class FSNamesystem implements Nam
* shutdown FSNamesystem
*/
void shutdown() {
- if (mbeanName != null)
+ if (mbeanName != null) {
MBeans.unregister(mbeanName);
+ }
+ if (dir != null) {
+ dir.shutdown();
+ }
+ if (blockManager != null) {
+ blockManager.shutdown();
+ }
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Wed Apr 10 20:17:39 2013
@@ -692,7 +692,12 @@ class NameNodeRpcServer implements Namen
metrics.incrFileInfoOps();
return namesystem.getFileInfo(src, true);
}
-
+
+ @Override // ClientProtocol
+ public boolean isFileClosed(String src) throws IOException{
+ return namesystem.isFileClosed(src);
+ }
+
@Override // ClientProtocol
public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
metrics.incrFileInfoOps();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Wed Apr 10 20:17:39 2013
@@ -171,6 +171,10 @@ class ImageLoaderCurrent implements Imag
v.visit(ImageElement.NUM_SNAPSHOTTABLE_DIRS, in.readInt());
}
+ if (LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
+ v.visit(ImageElement.LAST_INODE_ID, in.readLong());
+ }
+
if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imageVersion)) {
boolean isCompressed = in.readBoolean();
v.visit(ImageElement.IS_COMPRESSED, String.valueOf(isCompressed));
@@ -577,6 +581,9 @@ class ImageLoaderCurrent implements Imag
}
v.visit(ImageElement.INODE_PATH, pathName);
+ if (LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
+ v.visit(ImageElement.INODE_ID, in.readLong());
+ }
v.visit(ImageElement.REPLICATION, in.readShort());
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
if(LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion))
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java Wed Apr 10 20:17:39 2013
@@ -81,6 +81,9 @@ abstract class ImageVisitor {
DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME,
DELEGATION_TOKEN_IDENTIFIER_MASTER_KEY_ID,
TRANSACTION_ID,
+ LAST_INODE_ID,
+ INODE_ID,
+
SNAPSHOT_COUNTER,
NUM_SNAPSHOTS_TOTAL,
NUM_SNAPSHOTTABLE_DIRS,
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/GSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/GSet.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/GSet.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/GSet.java Wed Apr 10 20:17:39 2013
@@ -81,4 +81,6 @@ public interface GSet<K, E extends K> ex
* @throws NullPointerException if key == null.
*/
E remove(K key);
+
+ void clear();
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/GSetByHashMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/GSetByHashMap.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/GSetByHashMap.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/GSetByHashMap.java Wed Apr 10 20:17:39 2013
@@ -65,4 +65,9 @@ public class GSetByHashMap<K, E extends
public Iterator<E> iterator() {
return m.values().iterator();
}
+
+ @Override
+ public void clear() {
+ m.clear();
+ }
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightGSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightGSet.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightGSet.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightGSet.java Wed Apr 10 20:17:39 2013
@@ -85,7 +85,6 @@ public class LightWeightGSet<K, E extend
if (LOG.isDebugEnabled()) {
LOG.debug("recommended=" + recommended_length + ", actual=" + actual);
}
-
entries = new LinkedElement[actual];
hash_mask = entries.length - 1;
}
@@ -329,13 +328,18 @@ public class LightWeightGSet<K, E extend
final int exponent = e2 < 0? 0: e2 > 30? 30: e2;
final int c = 1 << exponent;
- if (LightWeightGSet.LOG.isDebugEnabled()) {
- LOG.debug("Computing capacity for map " + mapName);
- LOG.debug("VM type = " + vmBit + "-bit");
- LOG.debug(percentage + "% max memory = "
- + StringUtils.TraditionalBinaryPrefix.long2String(maxMemory, "B", 1));
- LOG.debug("capacity = 2^" + exponent + " = " + c + " entries");
- }
+ LOG.info("Computing capacity for map " + mapName);
+ LOG.info("VM type = " + vmBit + "-bit");
+ LOG.info(percentage + "% max memory = "
+ + StringUtils.TraditionalBinaryPrefix.long2String(maxMemory, "B", 1));
+ LOG.info("capacity = 2^" + exponent + " = " + c + " entries");
return c;
}
+
+ public void clear() {
+ for (int i = 0; i < entries.length; i++) {
+ entries[i] = null;
+ }
+ size = 0;
+ }
}
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1464808-1466652
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Wed Apr 10 20:17:39 2013
@@ -347,6 +347,14 @@ message GetFileInfoResponseProto {
optional HdfsFileStatusProto fs = 1;
}
+message IsFileClosedRequestProto {
+ required string src = 1;
+}
+
+message IsFileClosedResponseProto {
+ required bool result = 1;
+}
+
message GetFileLinkInfoRequestProto {
required string src = 1;
}
@@ -566,4 +574,6 @@ service ClientNamenodeProtocol {
returns(DeleteSnapshotResponseProto);
rpc getSnapshotDiffReport(GetSnapshotDiffReportRequestProto)
returns(GetSnapshotDiffReportResponseProto);
+ rpc isFileClosed(IsFileClosedRequestProto)
+ returns(IsFileClosedResponseProto);
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Wed Apr 10 20:17:39 2013
@@ -1231,4 +1231,32 @@
</description>
</property>
+<property>
+ <name>dfs.datanode.fsdataset.volume.choosing.balanced-space-threshold</name>
+ <value>10737418240</value> <!-- 10 GB -->
+ <description>
+ Only used when the dfs.datanode.fsdataset.volume.choosing.policy is set to
+ org.apache.hadoop.hdfs.server.datanode.fsdataset.AvailableSpaceVolumeChoosingPolicy.
+ This setting controls how much DN volumes are allowed to differ in terms of
+ bytes of free disk space before they are considered imbalanced. If the free
+ space of all the volumes are within this range of each other, the volumes
+ will be considered balanced and block assignments will be done on a pure
+ round robin basis.
+ </description>
+</property>
+
+<property>
+ <name>dfs.datanode.fsdataset.volume.choosing.balanced-space-preference-percent</name>
+ <value>0.75f</value>
+ <description>
+ Only used when the dfs.datanode.fsdataset.volume.choosing.policy is set to
+ org.apache.hadoop.hdfs.server.datanode.fsdataset.AvailableSpaceVolumeChoosingPolicy.
+ This setting controls what percentage of new block allocations will be sent
+ to volumes with more available disk space than others. This setting should
+ be in the range 0.0 - 1.0, though in practice 0.5 - 1.0, since there should
+ be no reason to prefer that volumes with less available disk space receive
+ more block allocations.
+ </description>
+</property>
+
</configuration>
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1464808-1466652
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1464808-1466652
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1464808-1466652
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1464808-1466652
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Wed Apr 10 20:17:39 2013
@@ -762,4 +762,27 @@ public class TestDistributedFileSystem {
}
}
}
+
+ @Test(timeout=60000)
+ public void testFileCloseStatus() throws IOException {
+ Configuration conf = new HdfsConfiguration();
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+ DistributedFileSystem fs = cluster.getFileSystem();
+ try {
+ // create a new file.
+ Path file = new Path("/simpleFlush.dat");
+ FSDataOutputStream output = fs.create(file);
+ // write to file
+ output.writeBytes("Some test data");
+ output.flush();
+ assertFalse("File status should be open", fs.isFileClosed(file));
+ output.close();
+ assertTrue("File status should be closed", fs.isFileClosed(file));
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java Wed Apr 10 20:17:39 2013
@@ -46,6 +46,7 @@ public class TestHDFSFileSystemContract
protected void tearDown() throws Exception {
super.tearDown();
cluster.shutdown();
+ cluster = null;
}
@Override
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java Wed Apr 10 20:17:39 2013
@@ -27,6 +27,7 @@ import java.net.InetSocketAddress;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Random;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -36,8 +37,11 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.test.MockitoUtil;
import org.apache.hadoop.util.Time;
import org.junit.Test;
+import org.mockito.Mockito;
/**
* This class tests the access time on files.
@@ -273,6 +277,37 @@ public class TestSetTimes {
cluster.shutdown();
}
}
+
+ /**
+ * Test that when access time updates are not needed, the FSNamesystem
+ * write lock is not taken by getBlockLocations.
+ * Regression test for HDFS-3981.
+ */
+ @Test(timeout=60000)
+ public void testGetBlockLocationsOnlyUsesReadLock() throws IOException {
+ Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 100*1000);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(0)
+ .build();
+ ReentrantReadWriteLock spyLock = NameNodeAdapter.spyOnFsLock(cluster.getNamesystem());
+ try {
+ // Create empty file in the FSN.
+ Path p = new Path("/empty-file");
+ DFSTestUtil.createFile(cluster.getFileSystem(), p, 0, (short)1, 0L);
+
+ // getBlockLocations() should not need the write lock, since we just created
+ // the file (and thus its access time is already within the 100-second
+ // accesstime precision configured above).
+ MockitoUtil.doThrowWhenCallStackMatches(
+ new AssertionError("Should not need write lock"),
+ ".*getBlockLocations.*")
+ .when(spyLock).writeLock();
+ cluster.getFileSystem().getFileBlockLocations(p, 0, 100);
+ } finally {
+ cluster.shutdown();
+ }
+ }
public static void main(String[] args) throws Exception {
new TestSetTimes().testTimes();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java Wed Apr 10 20:17:39 2013
@@ -74,6 +74,8 @@ public class TestQuorumJournalManager {
private Configuration conf;
private QuorumJournalManager qjm;
private List<AsyncLogger> spies;
+
+ private List<QuorumJournalManager> toClose = Lists.newLinkedList();
static {
((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.ALL);
@@ -98,11 +100,26 @@ public class TestQuorumJournalManager {
@After
public void shutdown() throws IOException {
+ IOUtils.cleanup(LOG, toClose.toArray(new Closeable[0]));
+
+ // Should not leak clients between tests -- this can cause flaky tests.
+ // (See HDFS-4643)
+ GenericTestUtils.assertNoThreadsMatching(".*IPC Client.*");
+
if (cluster != null) {
cluster.shutdown();
}
}
+ /**
+ * Enqueue a QJM for closing during shutdown. This makes the code a little
+ * easier to follow, with fewer try..finally clauses necessary.
+ */
+ private QuorumJournalManager closeLater(QuorumJournalManager qjm) {
+ toClose.add(qjm);
+ return qjm;
+ }
+
@Test
public void testSingleWriter() throws Exception {
writeSegment(cluster, qjm, 1, 3, true);
@@ -119,8 +136,8 @@ public class TestQuorumJournalManager {
@Test
public void testFormat() throws Exception {
- QuorumJournalManager qjm = new QuorumJournalManager(
- conf, cluster.getQuorumJournalURI("testFormat-jid"), FAKE_NSINFO);
+ QuorumJournalManager qjm = closeLater(new QuorumJournalManager(
+ conf, cluster.getQuorumJournalURI("testFormat-jid"), FAKE_NSINFO));
assertFalse(qjm.hasSomeData());
qjm.format(FAKE_NSINFO);
assertTrue(qjm.hasSomeData());
@@ -128,8 +145,7 @@ public class TestQuorumJournalManager {
@Test
public void testReaderWhileAnotherWrites() throws Exception {
-
- QuorumJournalManager readerQjm = createSpyingQJM();
+ QuorumJournalManager readerQjm = closeLater(createSpyingQJM());
List<EditLogInputStream> streams = Lists.newArrayList();
readerQjm.selectInputStreams(streams, 0, false);
assertEquals(0, streams.size());
@@ -251,8 +267,8 @@ public class TestQuorumJournalManager {
// Make a new QJM
- qjm = new QuorumJournalManager(
- conf, cluster.getQuorumJournalURI(JID), FAKE_NSINFO);
+ qjm = closeLater(new QuorumJournalManager(
+ conf, cluster.getQuorumJournalURI(JID), FAKE_NSINFO));
qjm.recoverUnfinalizedSegments();
checkRecovery(cluster, 1, 3);
@@ -364,8 +380,8 @@ public class TestQuorumJournalManager {
NNStorage.getInProgressEditsFileName(1));
// Make a new QJM
- qjm = new QuorumJournalManager(
- conf, cluster.getQuorumJournalURI(JID), FAKE_NSINFO);
+ qjm = closeLater(new QuorumJournalManager(
+ conf, cluster.getQuorumJournalURI(JID), FAKE_NSINFO));
qjm.recoverUnfinalizedSegments();
checkRecovery(cluster, 1, 3);
}
@@ -902,8 +918,8 @@ public class TestQuorumJournalManager {
return Mockito.spy(logger);
}
};
- return new QuorumJournalManager(
- conf, cluster.getQuorumJournalURI(JID), FAKE_NSINFO, spyFactory);
+ return closeLater(new QuorumJournalManager(
+ conf, cluster.getQuorumJournalURI(JID), FAKE_NSINFO, spyFactory));
}
private static void waitForAllPendingCalls(AsyncLoggerSet als)
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java Wed Apr 10 20:17:39 2013
@@ -17,10 +17,12 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
+import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Map;
@@ -68,6 +70,8 @@ public class TestBPOfferService {
TestBPOfferService.class);
private static final ExtendedBlock FAKE_BLOCK =
new ExtendedBlock(FAKE_BPID, 12345L);
+ private static final String TEST_BUILD_DATA = System.getProperty(
+ "test.build.data", "build/test/data");
static {
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
@@ -90,6 +94,8 @@ public class TestBPOfferService {
mockDn = Mockito.mock(DataNode.class);
Mockito.doReturn(true).when(mockDn).shouldRun();
Configuration conf = new Configuration();
+ File dnDataDir = new File(new File(TEST_BUILD_DATA, "dfs"), "data");
+ conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
Mockito.doReturn(conf).when(mockDn).getConf();
Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn"))
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java Wed Apr 10 20:17:39 2013
@@ -113,7 +113,7 @@ public class TestBlockPoolManager {
// Remove the first NS
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
- "ns1");
+ "ns2");
bpm.refreshNamenodes(conf);
assertEquals(
"stop #1\n" +
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java Wed Apr 10 20:17:39 2013
@@ -32,6 +32,14 @@ public class TestRoundRobinVolumeChoosin
// Test the Round-Robin block-volume choosing algorithm.
@Test
public void testRR() throws Exception {
+ @SuppressWarnings("unchecked")
+ final RoundRobinVolumeChoosingPolicy<FsVolumeSpi> policy =
+ ReflectionUtils.newInstance(RoundRobinVolumeChoosingPolicy.class, null);
+ testRR(policy);
+ }
+
+ public static void testRR(VolumeChoosingPolicy<FsVolumeSpi> policy)
+ throws Exception {
final List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
// First volume, with 100 bytes of space.
@@ -41,10 +49,6 @@ public class TestRoundRobinVolumeChoosin
// Second volume, with 200 bytes of space.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
-
- @SuppressWarnings("unchecked")
- final RoundRobinVolumeChoosingPolicy<FsVolumeSpi> policy =
- ReflectionUtils.newInstance(RoundRobinVolumeChoosingPolicy.class, null);
// Test two rounds of round-robin choosing
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0));
@@ -69,6 +73,13 @@ public class TestRoundRobinVolumeChoosin
// with volume and block sizes in exception message.
@Test
public void testRRPolicyExceptionMessage() throws Exception {
+ final RoundRobinVolumeChoosingPolicy<FsVolumeSpi> policy
+ = new RoundRobinVolumeChoosingPolicy<FsVolumeSpi>();
+ testRRPolicyExceptionMessage(policy);
+ }
+
+ public static void testRRPolicyExceptionMessage(
+ VolumeChoosingPolicy<FsVolumeSpi> policy) throws Exception {
final List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
// First volume, with 500 bytes of space.
@@ -79,8 +90,6 @@ public class TestRoundRobinVolumeChoosin
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L);
- final RoundRobinVolumeChoosingPolicy<FsVolumeSpi> policy
- = new RoundRobinVolumeChoosingPolicy<FsVolumeSpi>();
int blockSize = 700;
try {
policy.chooseVolume(volumes, blockSize);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Wed Apr 10 20:17:39 2013
@@ -218,7 +218,7 @@ public abstract class FSImageTestUtil {
FsPermission.createImmutable((short)0755));
for (int i = 1; i <= numDirs; i++) {
String dirName = "dir" + i;
- INodeDirectory dir = new INodeDirectory(newInodeId + i -1,
+ INodeDirectory dir = new INodeDirectory(newInodeId + i - 1,
DFSUtil.string2Bytes(dirName), perms, 0L);
editLog.logMkDir("/" + dirName, dir);
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1466658&r1=1466657&r2=1466658&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Wed Apr 10 20:17:39 2013
@@ -30,8 +30,6 @@ import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadInfo;
-import java.lang.management.ThreadMXBean;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.ArrayList;
@@ -122,18 +120,7 @@ public class TestCheckpoint {
@After
public void checkForSNNThreads() {
- ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
-
- ThreadInfo[] infos = threadBean.getThreadInfo(threadBean.getAllThreadIds(), 20);
- for (ThreadInfo info : infos) {
- if (info == null) continue;
- LOG.info("Check thread: " + info.getThreadName());
- if (info.getThreadName().contains("SecondaryNameNode")) {
- fail("Leaked thread: " + info + "\n" +
- Joiner.on("\n").join(info.getStackTrace()));
- }
- }
- LOG.info("--------");
+ GenericTestUtils.assertNoThreadsMatching(".*SecondaryNameNode.*");
}
static void checkFile(FileSystem fileSys, Path name, int repl)