You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by vi...@apache.org on 2013/11/10 21:09:14 UTC
svn commit: r1540535 [1/2] - in
/hadoop/common/branches/YARN-321/hadoop-hdfs-project: ./
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/
hadoop-hdfs-nfs/src/test/java/org/ap...
Author: vinodkv
Date: Sun Nov 10 20:09:09 2013
New Revision: 1540535
URL: http://svn.apache.org/r1540535
Log:
Forwarding YARN-321 branch to latest branch-2.
svn merge ../branch-2
Added:
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
- copied unchanged from r1540532, hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java
- copied unchanged from r1540532, hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOpenFileCtxCache.java
- copied unchanged from r1540532, hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOpenFileCtxCache.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java
- copied unchanged from r1540532, hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java
Modified:
hadoop/common/branches/YARN-321/hadoop-hdfs-project/ (props changed)
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgress.java
hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
Propchange: hadoop/common/branches/YARN-321/hadoop-hdfs-project/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project:r1538156
Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project:r1537327-1540532
Propchange: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs:r1537327-1540532
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1538156
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java Sun Nov 10 20:09:09 2013
@@ -18,8 +18,6 @@
package org.apache.hadoop.hdfs.nfs.mount;
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mount.MountdBase;
@@ -32,23 +30,14 @@ import org.apache.hadoop.mount.MountdBas
* handle for requested directory and returns it to the client.
*/
public class Mountd extends MountdBase {
- /**
- * Constructor
- * @param exports
- * @throws IOException
- */
- public Mountd(List<String> exports) throws IOException {
- super(exports, new RpcProgramMountd(exports));
- }
- public Mountd(List<String> exports, Configuration config) throws IOException {
- super(exports, new RpcProgramMountd(exports, config));
+ public Mountd(Configuration config) throws IOException {
+ super(new RpcProgramMountd(config));
}
public static void main(String[] args) throws IOException {
- List<String> exports = new ArrayList<String>();
- exports.add("/");
- Mountd mountd = new Mountd(exports);
+ Configuration config = new Configuration();
+ Mountd mountd = new Mountd(config);
mountd.start(true);
}
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java Sun Nov 10 20:09:09 2013
@@ -36,6 +36,7 @@ import org.apache.hadoop.mount.MountResp
import org.apache.hadoop.nfs.AccessPrivilege;
import org.apache.hadoop.nfs.NfsExports;
import org.apache.hadoop.nfs.nfs3.FileHandle;
+import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
import org.apache.hadoop.oncrpc.RpcCall;
@@ -49,6 +50,8 @@ import org.jboss.netty.buffer.ChannelBuf
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.ChannelHandlerContext;
+import com.google.common.annotations.VisibleForTesting;
+
/**
* RPC program corresponding to mountd daemon. See {@link Mountd}.
*/
@@ -71,23 +74,15 @@ public class RpcProgramMountd extends Rp
private final NfsExports hostsMatcher;
- public RpcProgramMountd() throws IOException {
- this(new ArrayList<String>(0));
- }
-
- public RpcProgramMountd(List<String> exports) throws IOException {
- this(exports, new Configuration());
- }
-
- public RpcProgramMountd(List<String> exports, Configuration config)
- throws IOException {
+ public RpcProgramMountd(Configuration config) throws IOException {
// Note that RPC cache is not enabled
super("mountd", "localhost", config.getInt("nfs3.mountd.port", PORT),
PROGRAM, VERSION_1, VERSION_3);
-
+ exports = new ArrayList<String>();
+ exports.add(config.get(Nfs3Constant.EXPORT_POINT,
+ Nfs3Constant.EXPORT_POINT_DEFAULT));
this.hostsMatcher = NfsExports.getInstance(config);
this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
- this.exports = Collections.unmodifiableList(exports);
this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
}
@@ -200,7 +195,7 @@ public class RpcProgramMountd extends Rp
} else if (mntproc == MNTPROC.UMNTALL) {
umntall(out, xid, client);
} else if (mntproc == MNTPROC.EXPORT) {
- // Currently only support one NFS export "/"
+ // Currently only support one NFS export
List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
hostsMatchers.add(hostsMatcher);
out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
@@ -220,4 +215,9 @@ public class RpcProgramMountd extends Rp
// Not required, because cache is turned off
return false;
}
+
+ @VisibleForTesting
+ public List<String> getExports() {
+ return this.exports;
+ }
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java Sun Nov 10 20:09:09 2013
@@ -46,6 +46,7 @@ public class AsyncDataService {
public AsyncDataService() {
threadFactory = new ThreadFactory() {
+ @Override
public Thread newThread(Runnable r) {
return new Thread(threadGroup, r);
}
@@ -129,6 +130,7 @@ public class AsyncDataService {
+ openFileCtx.getNextOffset();
}
+ @Override
public void run() {
try {
openFileCtx.executeWriteBack();
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java Sun Nov 10 20:09:09 2013
@@ -118,6 +118,7 @@ class DFSClientCache {
// Guava requires CacheLoader never returns null.
return ugi.doAs(new PrivilegedExceptionAction<DFSClient>() {
+ @Override
public DFSClient run() throws IOException {
return new DFSClient(NameNode.getAddress(config), config);
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java Sun Nov 10 20:09:09 2013
@@ -18,38 +18,45 @@
package org.apache.hadoop.hdfs.nfs.nfs3;
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.nfs.mount.Mountd;
import org.apache.hadoop.nfs.nfs3.Nfs3Base;
import org.apache.hadoop.util.StringUtils;
+import com.google.common.annotations.VisibleForTesting;
+
/**
* Nfs server. Supports NFS v3 using {@link RpcProgramNfs3}.
* Currently Mountd program is also started inside this class.
* Only TCP server is supported and UDP is not supported.
*/
public class Nfs3 extends Nfs3Base {
+ private Mountd mountd;
+
static {
Configuration.addDefaultResource("hdfs-default.xml");
Configuration.addDefaultResource("hdfs-site.xml");
}
- public Nfs3(List<String> exports) throws IOException {
- super(new Mountd(exports), new RpcProgramNfs3());
+ public Nfs3(Configuration conf) throws IOException {
+ super(new RpcProgramNfs3(conf), conf);
+ mountd = new Mountd(conf);
}
- public Nfs3(List<String> exports, Configuration config) throws IOException {
- super(new Mountd(exports, config), new RpcProgramNfs3(config), config);
+ public Mountd getMountd() {
+ return mountd;
}
-
+
+ @VisibleForTesting
+ public void startServiceInternal(boolean register) throws IOException {
+ mountd.start(register); // Start mountd
+ start(register);
+ }
+
public static void main(String[] args) throws IOException {
- StringUtils.startupShutdownMessage(Nfs3.class, args, LOG);
- List<String> exports = new ArrayList<String>();
- exports.add("/");
- final Nfs3 nfsServer = new Nfs3(exports);
- nfsServer.start(true);
+ StringUtils.startupShutdownMessage(Nfs3.class, args, LOG);
+ final Nfs3 nfsServer = new Nfs3(new Configuration());
+ nfsServer.startServiceInternal(true);
}
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java Sun Nov 10 20:09:09 2013
@@ -109,6 +109,12 @@ public class Nfs3Utils {
* Send a write response to the netty network socket channel
*/
public static void writeChannel(Channel channel, XDR out, int xid) {
+ if (channel == null) {
+ RpcProgramNfs3.LOG
+ .info("Null channel should only happen in tests. Do nothing.");
+ return;
+ }
+
if (RpcProgramNfs3.LOG.isDebugEnabled()) {
RpcProgramNfs3.LOG.debug(WRITE_RPC_END + xid);
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java Sun Nov 10 20:09:09 2013
@@ -24,7 +24,6 @@ import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException;
-import java.security.InvalidParameterException;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.Map.Entry;
@@ -96,7 +95,7 @@ class OpenFileCtx {
// It's updated after each sync to HDFS
private Nfs3FileAttributes latestAttr;
-
+
private final ConcurrentNavigableMap<OffsetRange, WriteCtx> pendingWrites;
private final ConcurrentNavigableMap<Long, CommitCtx> pendingCommits;
@@ -165,10 +164,22 @@ class OpenFileCtx {
return System.currentTimeMillis() - lastAccessTime > streamTimeout;
}
+ long getLastAccessTime() {
+ return lastAccessTime;
+ }
+
public long getNextOffset() {
return nextOffset.get();
}
+ boolean getActiveState() {
+ return this.activeState;
+ }
+
+ boolean hasPendingWork() {
+ return (pendingWrites.size() != 0 || pendingCommits.size() != 0);
+ }
+
// Increase or decrease the memory occupation of non-sequential writes
private long updateNonSequentialWriteInMemory(long count) {
long newValue = nonSequentialWriteInMemory.addAndGet(count);
@@ -800,19 +811,18 @@ class OpenFileCtx {
* @return true, remove stream; false, keep stream
*/
public synchronized boolean streamCleanup(long fileId, long streamTimeout) {
- if (streamTimeout < WriteManager.MINIMIUM_STREAM_TIMEOUT) {
- throw new InvalidParameterException("StreamTimeout" + streamTimeout
- + "ms is less than MINIMIUM_STREAM_TIMEOUT "
- + WriteManager.MINIMIUM_STREAM_TIMEOUT + "ms");
+ Preconditions
+ .checkState(streamTimeout >= Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
+ if (!activeState) {
+ return true;
}
boolean flag = false;
// Check the stream timeout
if (checkStreamTimeout(streamTimeout)) {
if (LOG.isDebugEnabled()) {
- LOG.debug("closing stream for fileId:" + fileId);
+ LOG.debug("stream can be closed for fileId:" + fileId);
}
- cleanup();
flag = true;
}
return flag;
@@ -985,7 +995,7 @@ class OpenFileCtx {
FileHandle handle = writeCtx.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("do write, fileId: " + handle.getFileId() + " offset: "
- + offset + " length:" + count + " stableHow:" + stableHow.getValue());
+ + offset + " length:" + count + " stableHow:" + stableHow.name());
}
try {
@@ -1017,6 +1027,23 @@ class OpenFileCtx {
}
if (!writeCtx.getReplied()) {
+ if (stableHow != WriteStableHow.UNSTABLE) {
+ LOG.info("Do sync for stable write:" + writeCtx);
+ try {
+ if (stableHow == WriteStableHow.DATA_SYNC) {
+ fos.hsync();
+ } else {
+ Preconditions.checkState(stableHow == WriteStableHow.FILE_SYNC,
+ "Unknown WriteStableHow:" + stableHow);
+ // Sync file data and length
+ fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
+ }
+ } catch (IOException e) {
+ LOG.error("hsync failed with writeCtx:" + writeCtx + " error:" + e);
+ throw e;
+ }
+ }
+
WccAttr preOpAttr = latestAttr.getWccAttr();
WccData fileWcc = new WccData(preOpAttr, latestAttr);
if (writeCtx.getOriginalCount() != WriteCtx.INVALID_ORIGINAL_COUNT) {
@@ -1049,7 +1076,7 @@ class OpenFileCtx {
}
}
- private synchronized void cleanup() {
+ synchronized void cleanup() {
if (!activeState) {
LOG.info("Current OpenFileCtx is already inactive, no need to cleanup.");
return;
@@ -1057,7 +1084,7 @@ class OpenFileCtx {
activeState = false;
// stop the dump thread
- if (dumpThread != null) {
+ if (dumpThread != null && dumpThread.isAlive()) {
dumpThread.interrupt();
try {
dumpThread.join(3000);
@@ -1139,4 +1166,10 @@ class OpenFileCtx {
void setActiveStatusForTest(boolean activeState) {
this.activeState = activeState;
}
+
+ @Override
+ public String toString() {
+ return String.format("activeState: %b asyncStatus: %b nextOffset: %d",
+ activeState, asyncStatus, nextOffset.get());
+ }
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Sun Nov 10 20:09:09 2013
@@ -126,6 +126,8 @@ import org.jboss.netty.buffer.ChannelBuf
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelHandlerContext;
+import com.google.common.annotations.VisibleForTesting;
+
/**
* RPC program corresponding to nfs daemon. See {@link Nfs3}.
*/
@@ -161,12 +163,9 @@ public class RpcProgramNfs3 extends RpcP
private final RpcCallCache rpcCallCache;
- public RpcProgramNfs3() throws IOException {
- this(new Configuration());
- }
-
public RpcProgramNfs3(Configuration config) throws IOException {
- super("NFS3", "localhost", Nfs3Constant.PORT, Nfs3Constant.PROGRAM,
+ super("NFS3", "localhost", config.getInt(Nfs3Constant.NFS3_SERVER_PORT,
+ Nfs3Constant.NFS3_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM,
Nfs3Constant.VERSION, Nfs3Constant.VERSION);
config.set(FsPermission.UMASK_LABEL, "000");
@@ -212,6 +211,11 @@ public class RpcProgramNfs3 extends RpcP
}
}
+ @Override
+ public void startDaemons() {
+ writeManager.startAsyncDataSerivce();
+ }
+
/******************************************************
* RPC call handlers
******************************************************/
@@ -776,7 +780,8 @@ public class RpcProgramNfs3 extends RpcP
int createMode = request.getMode();
if ((createMode != Nfs3Constant.CREATE_EXCLUSIVE)
- && request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)) {
+ && request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)
+ && request.getObjAttr().getSize() != 0) {
LOG.error("Setting file size is not supported when creating file: "
+ fileName + " dir fileId:" + dirHandle.getFileId());
return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -829,6 +834,23 @@ public class RpcProgramNfs3 extends RpcP
postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr),
dfsClient, dirFileIdPath, iug);
+
+ // Add open stream
+ OpenFileCtx openFileCtx = new OpenFileCtx(fos, postOpObjAttr,
+ writeDumpDir + "/" + postOpObjAttr.getFileId(), dfsClient, iug);
+ fileHandle = new FileHandle(postOpObjAttr.getFileId());
+ if (!writeManager.addOpenFileStream(fileHandle, openFileCtx)) {
+ LOG.warn("Can't add more stream, close it."
+ + " Future write will become append");
+ fos.close();
+ fos = null;
+ } else {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Opened stream for file:" + fileName + ", fileId:"
+ + fileHandle.getFileId());
+ }
+ }
+
} catch (IOException e) {
LOG.error("Exception", e);
if (fos != null) {
@@ -857,16 +879,6 @@ public class RpcProgramNfs3 extends RpcP
}
}
- // Add open stream
- OpenFileCtx openFileCtx = new OpenFileCtx(fos, postOpObjAttr, writeDumpDir
- + "/" + postOpObjAttr.getFileId(), dfsClient, iug);
- fileHandle = new FileHandle(postOpObjAttr.getFileId());
- writeManager.addOpenFileStream(fileHandle, openFileCtx);
- if (LOG.isDebugEnabled()) {
- LOG.debug("open stream for file:" + fileName + ", fileId:"
- + fileHandle.getFileId());
- }
-
return new CREATE3Response(Nfs3Status.NFS3_OK, fileHandle, postOpObjAttr,
dirWcc);
}
@@ -1975,4 +1987,9 @@ public class RpcProgramNfs3 extends RpcP
}
return true;
}
+
+ @VisibleForTesting
+ WriteManager getWriteManager() {
+ return this.writeManager;
+ }
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java Sun Nov 10 20:09:09 2013
@@ -18,8 +18,6 @@
package org.apache.hadoop.hdfs.nfs.nfs3;
import java.io.IOException;
-import java.util.Iterator;
-import java.util.Map.Entry;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
@@ -29,11 +27,12 @@ import org.apache.hadoop.fs.CommonConfig
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
+import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.nfs.NfsFileType;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.IdUserGroup;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
-import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
@@ -45,6 +44,7 @@ import org.apache.hadoop.oncrpc.security
import org.apache.hadoop.util.Daemon;
import org.jboss.netty.channel.Channel;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Maps;
/**
@@ -55,69 +55,70 @@ public class WriteManager {
private final Configuration config;
private final IdUserGroup iug;
- private final ConcurrentMap<FileHandle, OpenFileCtx> openFileMap = Maps
- .newConcurrentMap();
-
+
private AsyncDataService asyncDataService;
private boolean asyncDataServiceStarted = false;
- private final StreamMonitor streamMonitor;
-
+ private final int maxStreams;
+
/**
* The time limit to wait for accumulate reordered sequential writes to the
* same file before the write is considered done.
*/
private long streamTimeout;
-
- public static final long DEFAULT_STREAM_TIMEOUT = 10 * 60 * 1000; //10 minutes
- public static final long MINIMIUM_STREAM_TIMEOUT = 10 * 1000; //10 seconds
-
- void addOpenFileStream(FileHandle h, OpenFileCtx ctx) {
- openFileMap.put(h, ctx);
- if (LOG.isDebugEnabled()) {
- LOG.debug("After add the new stream " + h.getFileId()
- + ", the stream number:" + openFileMap.size());
+
+ private final OpenFileCtxCache fileContextCache;
+
+ static public class MultipleCachedStreamException extends IOException {
+ private static final long serialVersionUID = 1L;
+
+ public MultipleCachedStreamException(String msg) {
+ super(msg);
}
}
+ boolean addOpenFileStream(FileHandle h, OpenFileCtx ctx) {
+ return fileContextCache.put(h, ctx);
+ }
+
WriteManager(IdUserGroup iug, final Configuration config) {
this.iug = iug;
this.config = config;
-
- streamTimeout = config.getLong("dfs.nfs3.stream.timeout",
- DEFAULT_STREAM_TIMEOUT);
+ streamTimeout = config.getLong(Nfs3Constant.OUTPUT_STREAM_TIMEOUT,
+ Nfs3Constant.OUTPUT_STREAM_TIMEOUT_DEFAULT);
LOG.info("Stream timeout is " + streamTimeout + "ms.");
- if (streamTimeout < MINIMIUM_STREAM_TIMEOUT) {
+ if (streamTimeout < Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT) {
LOG.info("Reset stream timeout to minimum value "
- + MINIMIUM_STREAM_TIMEOUT + "ms.");
- streamTimeout = MINIMIUM_STREAM_TIMEOUT;
+ + Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT + "ms.");
+ streamTimeout = Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT;
}
-
- this.streamMonitor = new StreamMonitor();
+ maxStreams = config.getInt(Nfs3Constant.MAX_OPEN_FILES,
+ Nfs3Constant.MAX_OPEN_FILES_DEFAULT);
+ LOG.info("Maximum open streams is "+ maxStreams);
+ this.fileContextCache = new OpenFileCtxCache(config, streamTimeout);
}
- private void startAsyncDataSerivce() {
- streamMonitor.start();
+ void startAsyncDataSerivce() {
+ if (asyncDataServiceStarted) {
+ return;
+ }
+ fileContextCache.start();
this.asyncDataService = new AsyncDataService();
asyncDataServiceStarted = true;
}
- private void shutdownAsyncDataService() {
- asyncDataService.shutdown();
+ void shutdownAsyncDataService() {
+ if (!asyncDataServiceStarted) {
+ return;
+ }
asyncDataServiceStarted = false;
- streamMonitor.interrupt();
+ asyncDataService.shutdown();
+ fileContextCache.shutdown();
}
void handleWrite(DFSClient dfsClient, WRITE3Request request, Channel channel,
int xid, Nfs3FileAttributes preOpAttr) throws IOException {
- // First write request starts the async data service
- if (!asyncDataServiceStarted) {
- startAsyncDataSerivce();
- }
-
- long offset = request.getOffset();
int count = request.getCount();
- WriteStableHow stableHow = request.getStableHow();
byte[] data = request.getData().array();
if (data.length < count) {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -128,13 +129,12 @@ public class WriteManager {
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
- LOG.debug("handleWrite fileId: " + handle.getFileId() + " offset: "
- + offset + " length:" + count + " stableHow:" + stableHow.getValue());
+ LOG.debug("handleWrite " + request);
}
// Check if there is a stream to write
FileHandle fileHandle = request.getHandle();
- OpenFileCtx openFileCtx = openFileMap.get(fileHandle);
+ OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
if (openFileCtx == null) {
LOG.info("No opened stream for fileId:" + fileHandle.getFileId());
@@ -149,6 +149,15 @@ public class WriteManager {
fos = dfsClient.append(fileIdPath, bufferSize, null, null);
latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
+ } catch (RemoteException e) {
+ IOException io = e.unwrapRemoteException();
+ if (io instanceof AlreadyBeingCreatedException) {
+ LOG.warn("Can't append file:" + fileIdPath
+ + ". Possibly the file is being closed. Drop the request:"
+ + request + ", wait for the client to retry...");
+ return;
+ }
+ throw e;
} catch (IOException e) {
LOG.error("Can't apapend to file:" + fileIdPath + ", error:" + e);
if (fos != null) {
@@ -169,9 +178,26 @@ public class WriteManager {
Nfs3Constant.FILE_DUMP_DIR_DEFAULT);
openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/"
+ fileHandle.getFileId(), dfsClient, iug);
- addOpenFileStream(fileHandle, openFileCtx);
+
+ if (!addOpenFileStream(fileHandle, openFileCtx)) {
+ LOG.info("Can't add new stream. Close it. Tell client to retry.");
+ try {
+ fos.close();
+ } catch (IOException e) {
+ LOG.error("Can't close stream for fileId:" + handle.getFileId());
+ }
+ // Notify client to retry
+ WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
+ WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_JUKEBOX,
+ fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
+ Nfs3Utils.writeChannel(channel,
+ response.writeHeaderAndResponse(new XDR(), xid, new VerifierNone()),
+ xid);
+ return;
+ }
+
if (LOG.isDebugEnabled()) {
- LOG.debug("opened stream for file:" + fileHandle.getFileId());
+ LOG.debug("Opened stream for appending file:" + fileHandle.getFileId());
}
}
@@ -184,7 +210,7 @@ public class WriteManager {
void handleCommit(DFSClient dfsClient, FileHandle fileHandle,
long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
int status;
- OpenFileCtx openFileCtx = openFileMap.get(fileHandle);
+ OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
if (openFileCtx == null) {
LOG.info("No opened stream for fileId:" + fileHandle.getFileId()
@@ -237,7 +263,7 @@ public class WriteManager {
String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle);
Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);
if (attr != null) {
- OpenFileCtx openFileCtx = openFileMap.get(fileHandle);
+ OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
if (openFileCtx != null) {
attr.setSize(openFileCtx.getNextOffset());
attr.setUsed(openFileCtx.getNextOffset());
@@ -252,8 +278,8 @@ public class WriteManager {
Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);
if ((attr != null) && (attr.getType() == NfsFileType.NFSREG.toValue())) {
- OpenFileCtx openFileCtx = openFileMap
- .get(new FileHandle(attr.getFileId()));
+ OpenFileCtx openFileCtx = fileContextCache.get(new FileHandle(attr
+ .getFileId()));
if (openFileCtx != null) {
attr.setSize(openFileCtx.getNextOffset());
@@ -262,51 +288,9 @@ public class WriteManager {
}
return attr;
}
-
- /**
- * StreamMonitor wakes up periodically to find and closes idle streams.
- */
- class StreamMonitor extends Daemon {
- private int rotation = 5 * 1000; // 5 seconds
- private long lastWakeupTime = 0;
-
- @Override
- public void run() {
- while (true) {
- Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet()
- .iterator();
- if (LOG.isTraceEnabled()) {
- LOG.trace("openFileMap size:" + openFileMap.size());
- }
- while (it.hasNext()) {
- Entry<FileHandle, OpenFileCtx> pairs = it.next();
- OpenFileCtx ctx = pairs.getValue();
- if (ctx.streamCleanup((pairs.getKey()).getFileId(), streamTimeout)) {
- it.remove();
- if (LOG.isDebugEnabled()) {
- LOG.debug("After remove stream " + pairs.getKey().getFileId()
- + ", the stream number:" + openFileMap.size());
- }
- }
- }
-
- // Check if it can sleep
- try {
- long workedTime = System.currentTimeMillis() - lastWakeupTime;
- if (workedTime < rotation) {
- if (LOG.isTraceEnabled()) {
- LOG.trace("StreamMonitor can still have a sleep:"
- + ((rotation - workedTime) / 1000));
- }
- Thread.sleep(rotation - workedTime);
- }
- lastWakeupTime = System.currentTimeMillis();
- } catch (InterruptedException e) {
- LOG.info("StreamMonitor got interrupted");
- return;
- }
- }
- }
+ @VisibleForTesting
+ OpenFileCtxCache getOpenFileCtxCache() {
+ return this.fileContextCache;
}
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java Sun Nov 10 20:09:09 2013
@@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.nfs;
import java.io.IOException;
import java.net.InetAddress;
-import java.util.ArrayList;
-import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -45,13 +43,15 @@ public class TestMountd {
.build();
cluster.waitActive();
+ // Use emphral port in case tests are running in parallel
+ config.setInt("nfs3.mountd.port", 0);
+ config.setInt("nfs3.server.port", 0);
+
// Start nfs
- List<String> exports = new ArrayList<String>();
- exports.add("/");
- Nfs3 nfs3 = new Nfs3(exports, config);
- nfs3.start(false);
+ Nfs3 nfs3 = new Nfs3(config);
+ nfs3.startServiceInternal(false);
- RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountBase()
+ RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd()
.getRpcProgram();
mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java Sun Nov 10 20:09:09 2013
@@ -23,6 +23,7 @@ import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3Utils;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
@@ -135,6 +136,7 @@ public class TestOutOfOrderWrite {
@Override
protected ChannelPipelineFactory setPipelineFactory() {
this.pipelineFactory = new ChannelPipelineFactory() {
+ @Override
public ChannelPipeline getPipeline() {
return Channels.pipeline(
RpcUtil.constructRpcFrameDecoder(),
@@ -153,7 +155,9 @@ public class TestOutOfOrderWrite {
Arrays.fill(data3, (byte) 9);
// NFS3 Create request
- WriteClient client = new WriteClient("localhost", Nfs3Constant.PORT,
+ Configuration conf = new Configuration();
+ WriteClient client = new WriteClient("localhost", conf.getInt(
+ Nfs3Constant.NFS3_SERVER_PORT, Nfs3Constant.NFS3_SERVER_PORT_DEFAULT),
create(), false);
client.run();
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java Sun Nov 10 20:09:09 2013
@@ -22,7 +22,6 @@ import static org.junit.Assert.assertTru
import java.io.IOException;
import java.net.InetAddress;
-import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
@@ -67,11 +66,13 @@ public class TestReaddir {
hdfs = cluster.getFileSystem();
nn = cluster.getNameNode();
+ // Use emphral port in case tests are running in parallel
+ config.setInt("nfs3.mountd.port", 0);
+ config.setInt("nfs3.server.port", 0);
+
// Start nfs
- List<String> exports = new ArrayList<String>();
- exports.add("/");
- Nfs3 nfs3 = new Nfs3(exports, config);
- nfs3.start(false);
+ Nfs3 nfs3 = new Nfs3(config);
+ nfs3.startServiceInternal(false);
nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java Sun Nov 10 20:09:09 2013
@@ -17,21 +17,38 @@
*/
package org.apache.hadoop.hdfs.nfs.nfs3;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
import java.io.IOException;
+import java.net.InetAddress;
import java.nio.ByteBuffer;
+import java.util.Arrays;
import java.util.concurrent.ConcurrentNavigableMap;
import junit.framework.Assert;
import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.IdUserGroup;
+import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
+import org.apache.hadoop.nfs.nfs3.request.CREATE3Request;
+import org.apache.hadoop.nfs.nfs3.request.READ3Request;
+import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
+import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
+import org.apache.hadoop.nfs.nfs3.response.READ3Response;
+import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.SecurityHandler;
import org.junit.Test;
import org.mockito.Mockito;
@@ -105,7 +122,7 @@ public class TestWrites {
Assert.assertTrue(limit - position == 1);
Assert.assertTrue(appendedData.get(position) == (byte) 19);
}
-
+
@Test
// Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS, which
// includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX,
@@ -162,4 +179,118 @@ public class TestWrites {
ret = ctx.checkCommit(dfsClient, 0, null, 1, attr);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
}
+
+ private void waitWrite(RpcProgramNfs3 nfsd, FileHandle handle, int maxWaitTime)
+ throws InterruptedException {
+ int waitedTime = 0;
+ OpenFileCtx ctx = nfsd.getWriteManager()
+ .getOpenFileCtxCache().get(handle);
+ assertTrue(ctx != null);
+ do {
+ Thread.sleep(3000);
+ waitedTime += 3000;
+ if (ctx.getPendingWritesForTest().size() == 0) {
+ return;
+ }
+ } while (waitedTime < maxWaitTime);
+
+ fail("Write can't finish.");
+ }
+
+ @Test
+ public void testWriteStableHow() throws IOException, InterruptedException {
+ HdfsConfiguration config = new HdfsConfiguration();
+ DFSClient client = null;
+ MiniDFSCluster cluster = null;
+ RpcProgramNfs3 nfsd;
+ SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
+ Mockito.when(securityHandler.getUser()).thenReturn(
+ System.getProperty("user.name"));
+
+ try {
+ cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
+ cluster.waitActive();
+ client = new DFSClient(NameNode.getAddress(config), config);
+
+ // Use emphral port in case tests are running in parallel
+ config.setInt("nfs3.mountd.port", 0);
+ config.setInt("nfs3.server.port", 0);
+
+ // Start nfs
+ Nfs3 nfs3 = new Nfs3(config);
+ nfs3.startServiceInternal(false);
+ nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
+
+ HdfsFileStatus status = client.getFileInfo("/");
+ FileHandle rootHandle = new FileHandle(status.getFileId());
+ // Create file1
+ CREATE3Request createReq = new CREATE3Request(rootHandle, "file1",
+ Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
+ XDR createXdr = new XDR();
+ createReq.serialize(createXdr);
+ CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
+ securityHandler, InetAddress.getLocalHost());
+ FileHandle handle = createRsp.getObjHandle();
+
+ // Test DATA_SYNC
+ byte[] buffer = new byte[10];
+ for (int i = 0; i < 10; i++) {
+ buffer[i] = (byte) i;
+ }
+ WRITE3Request writeReq = new WRITE3Request(handle, 0, 10,
+ WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
+ XDR writeXdr = new XDR();
+ writeReq.serialize(writeXdr);
+ nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
+ InetAddress.getLocalHost());
+
+ waitWrite(nfsd, handle, 60000);
+
+ // Readback
+ READ3Request readReq = new READ3Request(handle, 0, 10);
+ XDR readXdr = new XDR();
+ readReq.serialize(readXdr);
+ READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
+ securityHandler, InetAddress.getLocalHost());
+
+ assertTrue(Arrays.equals(buffer, readRsp.getData().array()));
+
+ // Test FILE_SYNC
+
+ // Create file2
+ CREATE3Request createReq2 = new CREATE3Request(rootHandle, "file2",
+ Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
+ XDR createXdr2 = new XDR();
+ createReq2.serialize(createXdr2);
+ CREATE3Response createRsp2 = nfsd.create(createXdr2.asReadOnlyWrap(),
+ securityHandler, InetAddress.getLocalHost());
+ FileHandle handle2 = createRsp2.getObjHandle();
+
+ WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10,
+ WriteStableHow.FILE_SYNC, ByteBuffer.wrap(buffer));
+ XDR writeXdr2 = new XDR();
+ writeReq2.serialize(writeXdr2);
+ nfsd.write(writeXdr2.asReadOnlyWrap(), null, 1, securityHandler,
+ InetAddress.getLocalHost());
+
+ waitWrite(nfsd, handle2, 60000);
+
+ // Readback
+ READ3Request readReq2 = new READ3Request(handle2, 0, 10);
+ XDR readXdr2 = new XDR();
+ readReq2.serialize(readXdr2);
+ READ3Response readRsp2 = nfsd.read(readXdr2.asReadOnlyWrap(),
+ securityHandler, InetAddress.getLocalHost());
+
+ assertTrue(Arrays.equals(buffer, readRsp2.getData().array()));
+ // FILE_SYNC should sync the file size
+ status = client.getFileInfo("/file2");
+ assertTrue(status.getLen() == 10);
+
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sun Nov 10 20:09:09 2013
@@ -96,6 +96,9 @@ Release 2.3.0 - UNRELEASED
HDFS-5436. Move HsFtpFileSystem and HFtpFileSystem into org.apache.hdfs.web
(Haohui Mai via Arpit Agarwal)
+ HDFS-5371. Let client retry the same NN when
+ "dfs.client.test.drop.namenode.response.number" is enabled. (jing9)
+
OPTIMIZATIONS
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)
@@ -107,9 +110,6 @@ Release 2.3.0 - UNRELEASED
HDFS-5034. Remove debug prints from GetFileLinkInfo (Andrew Wang via Colin
Patrick McCabe)
- HDFS-5035. getFileLinkStatus and rename do not correctly check permissions
- of symlinks. (Andrew Wang via Colin Patrick McCabe)
-
HDFS-4816. transitionToActive blocks if the SBN is doing checkpoint image
transfer. (Andrew Wang)
@@ -140,6 +140,17 @@ Release 2.3.0 - UNRELEASED
HDFS-5257. addBlock() retry should return LocatedBlock with locations else client
will get AIOBE. (Vinay via jing9)
+ HDFS-5427. Not able to read deleted files from snapshot directly under
+ snapshottable dir after checkpoint and NN restart. (Vinay via jing9)
+
+ HDFS-5443. Delete 0-sized block when deleting an under-construction file that
+ is included in snapshot. (jing9)
+
+ HDFS-5476. Snapshot: clean the blocks/files/directories under a renamed
+ file/directory while deletion. (jing9)
+
+ HDFS-5325. Remove WebHdfsFileSystem#ConnRunner. (Haohui Mai via jing9)
+
Release 2.2.1 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -158,6 +169,8 @@ Release 2.2.1 - UNRELEASED
report to a configurable value. (Aaron T. Myers via Colin Patrick
McCabe)
+ HDFS-5344. Make LsSnapshottableDir as Tool interface implementation. (Sathish via umamahesh)
+
OPTIMIZATIONS
BUG FIXES
@@ -214,6 +227,27 @@ Release 2.2.1 - UNRELEASED
HDFS-5065. TestSymlinkHdfsDisable fails on Windows. (ivanmi)
+ HDFS-4633 TestDFSClientExcludedNodes fails sporadically if excluded nodes
+ cache expires too quickly (Chris Nauroth via Sanjay)
+
+ HDFS-5037. Active NN should trigger its own edit log rolls. (wang)
+
+ HDFS-5035. getFileLinkStatus and rename do not correctly check permissions
+ of symlinks. (Andrew Wang via Colin Patrick McCabe)
+
+ HDFS-5456. NameNode startup progress creates new steps if caller attempts to
+ create a counter for a step that doesn't already exist. (cnauroth)
+
+ HDFS-5458. Datanode failed volume threshold ignored if exception is thrown
+ in getDataDirsFromURIs. (Mike Mellenthin via wang)
+
+ HDFS-5252. Stable write is not handled correctly in someplace. (brandonli)
+
+ HDFS-5364. Add OpenFileCtx cache. (brandonli)
+
+ HDFS-5469. Add configuration property for the sub-directroy export path
+ (brandonli)
+
Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES
Propchange: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1537327-1540532
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1538156
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Sun Nov 10 20:09:09 2013
@@ -182,6 +182,11 @@ public class DFSConfigKeys extends Commo
public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum";
public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
+ public static final String DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD = "dfs.namenode.edit.log.autoroll.multiplier.threshold";
+ public static final float DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD_DEFAULT = 2.0f;
+ public static final String DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS = "dfs.namenode.edit.log.autoroll.check.interval.ms";
+ public static final int DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS_DEFAULT = 5*60*1000;
+
public static final String DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH = "dfs.namenode.edits.noeditlogchannelflush";
public static final boolean DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH_DEFAULT = false;
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java Sun Nov 10 20:09:09 2013
@@ -19,6 +19,7 @@
package org.apache.hadoop.hdfs;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configuration.DeprecationDelta;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -62,48 +63,83 @@ public class HdfsConfiguration extends C
public static void init() {
}
- private static void deprecate(String oldKey, String newKey) {
- Configuration.addDeprecation(oldKey, newKey);
- }
-
private static void addDeprecatedKeys() {
- deprecate("dfs.backup.address", DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY);
- deprecate("dfs.backup.http.address", DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY);
- deprecate("dfs.balance.bandwidthPerSec", DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY);
- deprecate("dfs.data.dir", DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
- deprecate("dfs.http.address", DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
- deprecate("dfs.https.address", DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
- deprecate("dfs.max.objects", DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY);
- deprecate("dfs.name.dir", DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
- deprecate("dfs.name.dir.restore", DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY);
- deprecate("dfs.name.edits.dir", DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
- deprecate("dfs.read.prefetch.size", DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY);
- deprecate("dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY);
- deprecate("dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY);
- deprecate("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
- deprecate("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY);
- deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
- deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
- deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY);
- deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
- deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY);
- deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY);
- deprecate("slave.host.name", DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY);
- deprecate("session.id", DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
- deprecate("dfs.access.time.precision", DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY);
- deprecate("dfs.replication.considerLoad", DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY);
- deprecate("dfs.replication.interval", DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY);
- deprecate("dfs.replication.min", DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY);
- deprecate("dfs.replication.pending.timeout.sec", DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY);
- deprecate("dfs.max-repl-streams", DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY);
- deprecate("dfs.permissions", DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY);
- deprecate("dfs.permissions.supergroup", DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY);
- deprecate("dfs.write.packet.size", DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY);
- deprecate("dfs.block.size", DFSConfigKeys.DFS_BLOCK_SIZE_KEY);
- deprecate("dfs.datanode.max.xcievers", DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY);
- deprecate("io.bytes.per.checksum", DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY);
- deprecate("dfs.federation.nameservices", DFSConfigKeys.DFS_NAMESERVICES);
- deprecate("dfs.federation.nameservice.id", DFSConfigKeys.DFS_NAMESERVICE_ID);
+ Configuration.addDeprecations(new DeprecationDelta[] {
+ new DeprecationDelta("dfs.backup.address",
+ DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY),
+ new DeprecationDelta("dfs.backup.http.address",
+ DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY),
+ new DeprecationDelta("dfs.balance.bandwidthPerSec",
+ DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY),
+ new DeprecationDelta("dfs.data.dir",
+ DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY),
+ new DeprecationDelta("dfs.http.address",
+ DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY),
+ new DeprecationDelta("dfs.https.address",
+ DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY),
+ new DeprecationDelta("dfs.max.objects",
+ DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY),
+ new DeprecationDelta("dfs.name.dir",
+ DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY),
+ new DeprecationDelta("dfs.name.dir.restore",
+ DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY),
+ new DeprecationDelta("dfs.name.edits.dir",
+ DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY),
+ new DeprecationDelta("dfs.read.prefetch.size",
+ DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY),
+ new DeprecationDelta("dfs.safemode.extension",
+ DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY),
+ new DeprecationDelta("dfs.safemode.threshold.pct",
+ DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY),
+ new DeprecationDelta("dfs.secondary.http.address",
+ DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY),
+ new DeprecationDelta("dfs.socket.timeout",
+ DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY),
+ new DeprecationDelta("fs.checkpoint.dir",
+ DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY),
+ new DeprecationDelta("fs.checkpoint.edits.dir",
+ DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY),
+ new DeprecationDelta("fs.checkpoint.period",
+ DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY),
+ new DeprecationDelta("heartbeat.recheck.interval",
+ DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY),
+ new DeprecationDelta("dfs.https.client.keystore.resource",
+ DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY),
+ new DeprecationDelta("dfs.https.need.client.auth",
+ DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY),
+ new DeprecationDelta("slave.host.name",
+ DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY),
+ new DeprecationDelta("session.id",
+ DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
+ new DeprecationDelta("dfs.access.time.precision",
+ DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY),
+ new DeprecationDelta("dfs.replication.considerLoad",
+ DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY),
+ new DeprecationDelta("dfs.replication.interval",
+ DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY),
+ new DeprecationDelta("dfs.replication.min",
+ DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY),
+ new DeprecationDelta("dfs.replication.pending.timeout.sec",
+ DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY),
+ new DeprecationDelta("dfs.max-repl-streams",
+ DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY),
+ new DeprecationDelta("dfs.permissions",
+ DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY),
+ new DeprecationDelta("dfs.permissions.supergroup",
+ DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
+ new DeprecationDelta("dfs.write.packet.size",
+ DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY),
+ new DeprecationDelta("dfs.block.size",
+ DFSConfigKeys.DFS_BLOCK_SIZE_KEY),
+ new DeprecationDelta("dfs.datanode.max.xcievers",
+ DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY),
+ new DeprecationDelta("io.bytes.per.checksum",
+ DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY),
+ new DeprecationDelta("dfs.federation.nameservices",
+ DFSConfigKeys.DFS_NAMESERVICES),
+ new DeprecationDelta("dfs.federation.nameservice.id",
+ DFSConfigKeys.DFS_NAMESERVICE_ID)
+ });
}
public static void main(String[] args) {
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java Sun Nov 10 20:09:09 2013
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.bl
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.Iterator;
import java.util.List;
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Sun Nov 10 20:09:09 2013
@@ -1752,7 +1752,7 @@ public class DataNode extends Configured
} catch (IOException ioe) {
LOG.warn("Invalid " + DFS_DATANODE_DATA_DIR_KEY + " "
+ dir + " : ", ioe);
- invalidDirs.append("\"").append(dir.getCanonicalPath()).append("\" ");
+ invalidDirs.append("\"").append(dirURI.getPath()).append("\" ");
}
}
if (dirs.size() == 0) {
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Sun Nov 10 20:09:09 2013
@@ -586,8 +586,12 @@ public class FSImageFormat {
namesystem.dir.cacheName(child);
if (child.isFile()) {
+ updateBlocksMap(child.asFile());
+ }
+ }
+
+ public void updateBlocksMap(INodeFile file) {
// Add file->block mapping
- final INodeFile file = child.asFile();
final BlockInfo[] blocks = file.getBlocks();
if (blocks != null) {
final BlockManager bm = namesystem.getBlockManager();
@@ -596,7 +600,6 @@ public class FSImageFormat {
}
}
}
- }
/** @return The FSDirectory of the namesystem where the fsimage is loaded */
public FSDirectory getFSDirectoryInLoading() {
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sun Nov 10 20:09:09 2013
@@ -38,6 +38,8 @@ import static org.apache.hadoop.hdfs.DFS
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY;
@@ -49,6 +51,10 @@ import static org.apache.hadoop.hdfs.DFS
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
@@ -375,6 +381,16 @@ public class FSNamesystem implements Nam
Daemon nnrmthread = null; // NamenodeResourceMonitor thread
+ Daemon nnEditLogRoller = null; // NameNodeEditLogRoller thread
+ /**
+ * When an active namenode will roll its own edit log, in # edits
+ */
+ private final long editLogRollerThreshold;
+ /**
+ * Check interval of an active namenode's edit log roller thread
+ */
+ private final int editLogRollerInterval;
+
private volatile boolean hasResourcesAvailable = false;
private volatile boolean fsRunning = true;
@@ -688,7 +704,17 @@ public class FSNamesystem implements Nam
this.standbyShouldCheckpoint = conf.getBoolean(
DFS_HA_STANDBY_CHECKPOINTS_KEY, DFS_HA_STANDBY_CHECKPOINTS_DEFAULT);
-
+ // # edit autoroll threshold is a multiple of the checkpoint threshold
+ this.editLogRollerThreshold = (long)
+ (conf.getFloat(
+ DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD,
+ DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD_DEFAULT) *
+ conf.getLong(
+ DFS_NAMENODE_CHECKPOINT_TXNS_KEY,
+ DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT));
+ this.editLogRollerInterval = conf.getInt(
+ DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS,
+ DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS_DEFAULT);
this.inodeId = new INodeId();
// For testing purposes, allow the DT secret manager to be started regardless
@@ -956,6 +982,11 @@ public class FSNamesystem implements Nam
//ResourceMonitor required only at ActiveNN. See HDFS-2914
this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
nnrmthread.start();
+
+ nnEditLogRoller = new Daemon(new NameNodeEditLogRoller(
+ editLogRollerThreshold, editLogRollerInterval));
+ nnEditLogRoller.start();
+
} finally {
writeUnlock();
startingActiveService = false;
@@ -993,6 +1024,10 @@ public class FSNamesystem implements Nam
((NameNodeResourceMonitor) nnrmthread.getRunnable()).stopMonitor();
nnrmthread.interrupt();
}
+ if (nnEditLogRoller != null) {
+ ((NameNodeEditLogRoller)nnEditLogRoller.getRunnable()).stop();
+ nnEditLogRoller.interrupt();
+ }
if (dir != null && dir.fsImage != null) {
if (dir.fsImage.editLog != null) {
dir.fsImage.editLog.close();
@@ -4124,7 +4159,48 @@ public class FSNamesystem implements Nam
shouldNNRmRun = false;
}
}
-
+
+ class NameNodeEditLogRoller implements Runnable {
+
+ private boolean shouldRun = true;
+ private final long rollThreshold;
+ private final long sleepIntervalMs;
+
+ public NameNodeEditLogRoller(long rollThreshold, int sleepIntervalMs) {
+ this.rollThreshold = rollThreshold;
+ this.sleepIntervalMs = sleepIntervalMs;
+ }
+
+ @Override
+ public void run() {
+ while (fsRunning && shouldRun) {
+ try {
+ FSEditLog editLog = getFSImage().getEditLog();
+ long numEdits =
+ editLog.getLastWrittenTxId() - editLog.getCurSegmentTxId();
+ if (numEdits > rollThreshold) {
+ FSNamesystem.LOG.info("NameNode rolling its own edit log because"
+ + " number of edits in open segment exceeds threshold of "
+ + rollThreshold);
+ rollEditLog();
+ }
+ Thread.sleep(sleepIntervalMs);
+ } catch (InterruptedException e) {
+ FSNamesystem.LOG.info(NameNodeEditLogRoller.class.getSimpleName()
+ + " was interrupted, exiting");
+ break;
+ } catch (Exception e) {
+ FSNamesystem.LOG.error("Swallowing exception in "
+ + NameNodeEditLogRoller.class.getSimpleName() + ":", e);
+ }
+ }
+ }
+
+ public void stop() {
+ shouldRun = false;
+ }
+ }
+
public FSImage getFSImage() {
return dir.fsImage;
}
@@ -5141,7 +5217,9 @@ public class FSNamesystem implements Nam
try {
checkOperation(OperationCategory.JOURNAL);
checkNameNodeSafeMode("Log not rolled");
- LOG.info("Roll Edit Log from " + Server.getRemoteAddress());
+ if (Server.isRpcInvocation()) {
+ LOG.info("Roll Edit Log from " + Server.getRemoteAddress());
+ }
return getFSImage().rollEditLog();
} finally {
writeUnlock();
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Sun Nov 10 20:09:09 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
+import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
@@ -30,6 +31,7 @@ import org.apache.hadoop.hdfs.server.blo
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.namenode.Quota.Counts;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@@ -131,6 +133,39 @@ public class INodeFileUnderConstruction
}
@Override
+ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
+ final BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, final boolean countDiffChange)
+ throws QuotaExceededException {
+ if (snapshot == null && prior != null) {
+ cleanZeroSizeBlock(collectedBlocks);
+ return Counts.newInstance();
+ } else {
+ return super.cleanSubtree(snapshot, prior, collectedBlocks,
+ removedINodes, countDiffChange);
+ }
+ }
+
+ /**
+ * When deleting a file in the current fs directory, and the file is contained
+ * in a snapshot, we should delete the last block if it's under construction
+ * and its size is 0.
+ */
+ private void cleanZeroSizeBlock(final BlocksMapUpdateInfo collectedBlocks) {
+ final BlockInfo[] blocks = getBlocks();
+ if (blocks != null && blocks.length > 0
+ && blocks[blocks.length - 1] instanceof BlockInfoUnderConstruction) {
+ BlockInfoUnderConstruction lastUC =
+ (BlockInfoUnderConstruction) blocks[blocks.length - 1];
+ if (lastUC.getNumBytes() == 0) {
+ // this is a 0-sized block. do not need check its UC state here
+ collectedBlocks.addDeleteBlock(lastUC);
+ removeLastBlock(lastUC);
+ }
+ }
+ }
+
+ @Override
public INodeFileUnderConstruction recordModification(final Snapshot latest,
final INodeMap inodeMap) throws QuotaExceededException {
if (isInLatestSnapshot(latest)) {
@@ -157,7 +192,7 @@ public class INodeFileUnderConstruction
* Remove a block from the block list. This block should be
* the last one on the list.
*/
- boolean removeLastBlock(Block oldblock) throws IOException {
+ boolean removeLastBlock(Block oldblock) {
final BlockInfo[] blocks = getBlocks();
if (blocks == null || blocks.length == 0) {
return false;
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java Sun Nov 10 20:09:09 2013
@@ -646,16 +646,14 @@ public abstract class INodeReference ext
FileWithSnapshot sfile = (FileWithSnapshot) referred;
// make sure we mark the file as deleted
sfile.deleteCurrentFile();
- if (snapshot != null) {
- try {
- // when calling cleanSubtree of the referred node, since we
- // compute quota usage updates before calling this destroy
- // function, we use true for countDiffChange
- referred.cleanSubtree(snapshot, prior, collectedBlocks,
- removedINodes, true);
- } catch (QuotaExceededException e) {
- LOG.error("should not exceed quota while snapshot deletion", e);
- }
+ try {
+ // when calling cleanSubtree of the referred node, since we
+ // compute quota usage updates before calling this destroy
+ // function, we use true for countDiffChange
+ referred.cleanSubtree(snapshot, prior, collectedBlocks,
+ removedINodes, true);
+ } catch (QuotaExceededException e) {
+ LOG.error("should not exceed quota while snapshot deletion", e);
}
} else if (referred instanceof INodeDirectoryWithSnapshot) {
// similarly, if referred is a directory, it must be an
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ActiveState.java Sun Nov 10 20:09:09 2013
@@ -38,7 +38,7 @@ public class ActiveState extends HAState
@Override
public void checkOperation(HAContext context, OperationCategory op) {
- return; // Other than journal all operations are allowed in active state
+ return; // All operations are allowed in active state
}
@Override
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java Sun Nov 10 20:09:09 2013
@@ -716,14 +716,8 @@ public class INodeDirectoryWithSnapshot
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
List<INode> cList = priorDiff.diff.getList(ListType.CREATED);
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
- priorCreated = new HashMap<INode, INode>(cList.size());
- for (INode cNode : cList) {
- priorCreated.put(cNode, cNode);
- }
- priorDeleted = new HashMap<INode, INode>(dList.size());
- for (INode dNode : dList) {
- priorDeleted.put(dNode, dNode);
- }
+ priorCreated = cloneDiffList(cList);
+ priorDeleted = cloneDiffList(dList);
}
}
@@ -896,6 +890,17 @@ public class INodeDirectoryWithSnapshot
counts.add(Content.DIRECTORY, diffs.asList().size());
}
+ private static Map<INode, INode> cloneDiffList(List<INode> diffList) {
+ if (diffList == null || diffList.size() == 0) {
+ return null;
+ }
+ Map<INode, INode> map = new HashMap<INode, INode>(diffList.size());
+ for (INode node : diffList) {
+ map.put(node, node);
+ }
+ return map;
+ }
+
/**
* Destroy a subtree under a DstReference node.
*/
@@ -914,26 +919,28 @@ public class INodeDirectoryWithSnapshot
destroyDstSubtree(inode.asReference().getReferredINode(), snapshot,
prior, collectedBlocks, removedINodes);
}
- } else if (inode.isFile() && snapshot != null) {
+ } else if (inode.isFile()) {
inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes, true);
} else if (inode.isDirectory()) {
Map<INode, INode> excludedNodes = null;
if (inode instanceof INodeDirectoryWithSnapshot) {
INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) inode;
+
DirectoryDiffList diffList = sdir.getDiffs();
+ DirectoryDiff priorDiff = diffList.getDiff(prior);
+ if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
+ List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
+ excludedNodes = cloneDiffList(dList);
+ }
+
if (snapshot != null) {
diffList.deleteSnapshotDiff(snapshot, prior, sdir, collectedBlocks,
removedINodes, true);
}
- DirectoryDiff priorDiff = diffList.getDiff(prior);
+ priorDiff = diffList.getDiff(prior);
if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
priorDiff.diff.destroyCreatedList(sdir, collectedBlocks,
removedINodes);
- List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
- excludedNodes = new HashMap<INode, INode>(dList.size());
- for (INode dNode : dList) {
- excludedNodes.put(dNode, dNode);
- }
}
}
for (INode child : inode.asDirectory().getChildrenList(prior)) {
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java Sun Nov 10 20:09:09 2013
@@ -109,8 +109,10 @@ public class INodeFileUnderConstructionW
final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
if (snapshot == null) { // delete the current file
- recordModification(prior, null);
- isCurrentFileDeleted = true;
+ if (!isCurrentFileDeleted()) {
+ recordModification(prior, null);
+ deleteCurrentFile();
+ }
Util.collectBlocksAndClear(this, collectedBlocks, removedINodes);
return Quota.Counts.newInstance();
} else { // delete a snapshot
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java?rev=1540535&r1=1540534&r2=1540535&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java Sun Nov 10 20:09:09 2013
@@ -96,8 +96,10 @@ public class INodeFileWithSnapshot exten
final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
if (snapshot == null) { // delete the current file
- recordModification(prior, null);
- isCurrentFileDeleted = true;
+ if (!isCurrentFileDeleted()) {
+ recordModification(prior, null);
+ deleteCurrentFile();
+ }
Util.collectBlocksAndClear(this, collectedBlocks, removedINodes);
return Quota.Counts.newInstance();
} else { // delete a snapshot