You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ji...@apache.org on 2013/07/10 19:01:29 UTC
svn commit: r1501851 - in /hadoop/common/trunk/hadoop-hdfs-project:
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/
hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/...
Author: jing9
Date: Wed Jul 10 17:01:28 2013
New Revision: 1501851
URL: http://svn.apache.org/r1501851
Log:
HDFS-4962. Use enum for nfs constants. Contributed by Tsz Wo (Nicholas) SZE.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java?rev=1501851&r1=1501850&r2=1501851&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java Wed Jul 10 17:01:28 2013
@@ -154,24 +154,25 @@ public class RpcProgramMountd extends Rp
@Override
public XDR handleInternal(RpcCall rpcCall, XDR xdr, XDR out,
InetAddress client, Channel channel) {
- int procedure = rpcCall.getProcedure();
+ final MNTPROC mntproc = MNTPROC.fromValue(rpcCall.getProcedure());
int xid = rpcCall.getXid();
- if (procedure == MNTPROC_NULL) {
+ if (mntproc == MNTPROC.NULL) {
out = nullOp(out, xid, client);
- } else if (procedure == MNTPROC_MNT) {
+ } else if (mntproc == MNTPROC.MNT) {
out = mnt(xdr, out, xid, client);
- } else if (procedure == MNTPROC_DUMP) {
+ } else if (mntproc == MNTPROC.DUMP) {
out = dump(out, xid, client);
- } else if (procedure == MNTPROC_UMNT) {
+ } else if (mntproc == MNTPROC.UMNT) {
out = umnt(xdr, out, xid, client);
- } else if (procedure == MNTPROC_UMNTALL) {
+ } else if (mntproc == MNTPROC.UMNTALL) {
umntall(out, xid, client);
- } else if (procedure == MNTPROC_EXPORT) {
+ } else if (mntproc == MNTPROC.EXPORT) {
out = MountResponse.writeExportList(out, xid, exports);
} else {
// Invalid procedure
RpcAcceptedReply.voidReply(out, xid,
- RpcAcceptedReply.AcceptState.PROC_UNAVAIL); }
+ RpcAcceptedReply.AcceptState.PROC_UNAVAIL);
+ }
return out;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java?rev=1501851&r1=1501850&r2=1501851&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java Wed Jul 10 17:01:28 2013
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FSDataOutput
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
+import org.apache.hadoop.hdfs.nfs.nfs3.WriteCtx.DataState;
import org.apache.hadoop.io.BytesWritable.Comparator;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.nfs.nfs3.FileHandle;
@@ -345,7 +346,7 @@ class OpenFileCtx {
+ " nextOffset:" + nextOffset);
WriteCtx writeCtx = new WriteCtx(request.getHandle(),
request.getOffset(), request.getCount(), request.getStableHow(),
- request.getData().array(), channel, xid, false, WriteCtx.NO_DUMP);
+ request.getData().array(), channel, xid, false, DataState.NO_DUMP);
addWrite(writeCtx);
// Create an async task and change openFileCtx status to indicate async
@@ -373,7 +374,7 @@ class OpenFileCtx {
+ nextOffset);
WriteCtx writeCtx = new WriteCtx(request.getHandle(),
request.getOffset(), request.getCount(), request.getStableHow(),
- request.getData().array(), channel, xid, false, WriteCtx.ALLOW_DUMP);
+ request.getData().array(), channel, xid, false, DataState.ALLOW_DUMP);
addWrite(writeCtx);
// Check if need to dump some pending requests to file
@@ -693,7 +694,7 @@ class OpenFileCtx {
nextOffset = fos.getPos();
// Reduce memory occupation size if request was allowed dumped
- if (writeCtx.getDataState() == WriteCtx.ALLOW_DUMP) {
+ if (writeCtx.getDataState() == DataState.ALLOW_DUMP) {
updateNonSequentialWriteInMemory(-count);
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1501851&r1=1501850&r2=1501851&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Wed Jul 10 17:01:28 2013
@@ -46,6 +46,7 @@ import org.apache.hadoop.nfs.NfsTime;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.IdUserGroup;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
+import org.apache.hadoop.nfs.nfs3.Nfs3Constant.NFSPROC3;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Interface;
@@ -1530,12 +1531,12 @@ public class RpcProgramNfs3 extends RpcP
@Override
public XDR handleInternal(RpcCall rpcCall, final XDR xdr, XDR out,
InetAddress client, Channel channel) {
- int procedure = rpcCall.getProcedure();
+ final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(rpcCall.getProcedure());
int xid = rpcCall.getXid();
RpcAuthSys authSys = null;
// Ignore auth only for NFSPROC3_NULL, especially for Linux clients.
- if (procedure != Nfs3Constant.NFSPROC3_NULL) {
+ if (nfsproc3 != NFSPROC3.NULL) {
if (rpcCall.getCredential().getFlavor() != AuthFlavor.AUTH_SYS) {
LOG.info("Wrong RPC AUTH flavor, "
+ rpcCall.getCredential().getFlavor() + " is not AUTH_SYS.");
@@ -1549,49 +1550,49 @@ public class RpcProgramNfs3 extends RpcP
}
NFS3Response response = null;
- if (procedure == Nfs3Constant.NFSPROC3_NULL) {
+ if (nfsproc3 == NFSPROC3.NULL) {
response = nullProcedure();
- } else if (procedure == Nfs3Constant.NFSPROC3_GETATTR) {
+ } else if (nfsproc3 == NFSPROC3.GETATTR) {
response = getattr(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_SETATTR) {
+ } else if (nfsproc3 == NFSPROC3.SETATTR) {
response = setattr(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_LOOKUP) {
+ } else if (nfsproc3 == NFSPROC3.LOOKUP) {
response = lookup(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_ACCESS) {
+ } else if (nfsproc3 == NFSPROC3.ACCESS) {
response = access(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_READLINK) {
+ } else if (nfsproc3 == NFSPROC3.READLINK) {
response = readlink(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_READ) {
+ } else if (nfsproc3 == NFSPROC3.READ) {
response = read(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_WRITE) {
+ } else if (nfsproc3 == NFSPROC3.WRITE) {
response = write(xdr, channel, xid, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_CREATE) {
+ } else if (nfsproc3 == NFSPROC3.CREATE) {
response = create(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_MKDIR) {
+ } else if (nfsproc3 == NFSPROC3.MKDIR) {
response = mkdir(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_SYMLINK) {
+ } else if (nfsproc3 == NFSPROC3.SYMLINK) {
response = symlink(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_MKNOD) {
+ } else if (nfsproc3 == NFSPROC3.MKNOD) {
response = mknod(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_REMOVE) {
+ } else if (nfsproc3 == NFSPROC3.REMOVE) {
response = remove(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_RMDIR) {
+ } else if (nfsproc3 == NFSPROC3.RMDIR) {
response = rmdir(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_RENAME) {
+ } else if (nfsproc3 == NFSPROC3.RENAME) {
response = rename(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_LINK) {
+ } else if (nfsproc3 == NFSPROC3.LINK) {
response = link(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_READDIR) {
+ } else if (nfsproc3 == NFSPROC3.READDIR) {
response = readdir(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_READDIRPLUS) {
+ } else if (nfsproc3 == NFSPROC3.READDIRPLUS) {
response = readdirplus(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_FSSTAT) {
+ } else if (nfsproc3 == NFSPROC3.FSSTAT) {
response = fsstat(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_FSINFO) {
+ } else if (nfsproc3 == NFSPROC3.FSINFO) {
response = fsinfo(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_PATHCONF) {
+ } else if (nfsproc3 == NFSPROC3.PATHCONF) {
response = pathconf(xdr, authSys);
- } else if (procedure == Nfs3Constant.NFSPROC3_COMMIT) {
+ } else if (nfsproc3 == NFSPROC3.COMMIT) {
response = commit(xdr, authSys);
} else {
// Invalid procedure
@@ -1606,17 +1607,7 @@ public class RpcProgramNfs3 extends RpcP
@Override
protected boolean isIdempotent(RpcCall call) {
- return isIdempotent(call.getProcedure());
- }
-
- public static boolean isIdempotent(int procedure) {
- return !(procedure == Nfs3Constant.NFSPROC3_CREATE
- || procedure == Nfs3Constant.NFSPROC3_REMOVE
- || procedure == Nfs3Constant.NFSPROC3_MKDIR
- || procedure == Nfs3Constant.NFSPROC3_MKNOD
- || procedure == Nfs3Constant.NFSPROC3_LINK
- || procedure == Nfs3Constant.NFSPROC3_RMDIR
- || procedure == Nfs3Constant.NFSPROC3_SYMLINK
- || procedure == Nfs3Constant.NFSPROC3_RENAME);
+ final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(call.getProcedure());
+ return nfsproc3 == null || nfsproc3.isIdempotent();
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java?rev=1501851&r1=1501850&r2=1501851&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java Wed Jul 10 17:01:28 2013
@@ -33,6 +33,17 @@ import org.jboss.netty.channel.Channel;
*/
class WriteCtx {
public static final Log LOG = LogFactory.getLog(WriteCtx.class);
+
+ /**
+ * In memory write data has 3 states. ALLOW_DUMP: not sequential write, still
+ * wait for prerequisit writes. NO_DUMP: sequential write, no need to dump
+ * since it will be written to HDFS soon. DUMPED: already dumped to a file.
+ */
+ public static enum DataState {
+ ALLOW_DUMP,
+ NO_DUMP,
+ DUMPED;
+ }
private final FileHandle handle;
private final long offset;
@@ -43,22 +54,14 @@ class WriteCtx {
private final Channel channel;
private final int xid;
private boolean replied;
-
- /**
- * In memory write data has 3 states. ALLOW_DUMP: not sequential write, still
- * wait for prerequisit writes. NO_DUMP: sequential write, no need to dump
- * since it will be written to HDFS soon. DUMPED: already dumped to a file.
- */
- public final static int ALLOW_DUMP = 0;
- public final static int NO_DUMP = 1;
- public final static int DUMPED = 2;
- private int dataState;
- public int getDataState() {
+ private DataState dataState;
+
+ public DataState getDataState() {
return dataState;
}
- public void setDataState(int dataState) {
+ public void setDataState(DataState dataState) {
this.dataState = dataState;
}
@@ -68,7 +71,7 @@ class WriteCtx {
// Return the dumped data size
public long dumpData(FileOutputStream dumpOut, RandomAccessFile raf)
throws IOException {
- if (dataState != ALLOW_DUMP) {
+ if (dataState != DataState.ALLOW_DUMP) {
if (LOG.isTraceEnabled()) {
LOG.trace("No need to dump with status(replied,dataState):" + "("
+ replied + "," + dataState + ")");
@@ -82,7 +85,7 @@ class WriteCtx {
LOG.debug("After dump, new dumpFileOffset:" + dumpFileOffset);
}
data = null;
- dataState = DUMPED;
+ dataState = DataState.DUMPED;
return count;
}
@@ -103,7 +106,7 @@ class WriteCtx {
}
public byte[] getData() throws IOException {
- if (dataState != DUMPED) {
+ if (dataState != DataState.DUMPED) {
if (data == null) {
throw new IOException("Data is not dumpted but has null:" + this);
}
@@ -140,7 +143,7 @@ class WriteCtx {
}
WriteCtx(FileHandle handle, long offset, int count, WriteStableHow stableHow,
- byte[] data, Channel channel, int xid, boolean replied, int dataState) {
+ byte[] data, Channel channel, int xid, boolean replied, DataState dataState) {
this.handle = handle;
this.offset = offset;
this.count = count;
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java?rev=1501851&r1=1501850&r2=1501851&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java Wed Jul 10 17:01:28 2013
@@ -59,7 +59,7 @@ public class TestOutOfOrderWrite {
static XDR create() {
XDR request = new XDR();
RpcCall.write(request, 0x8000004c, Nfs3Constant.PROGRAM,
- Nfs3Constant.VERSION, Nfs3Constant.NFSPROC3_CREATE);
+ Nfs3Constant.VERSION, Nfs3Constant.NFSPROC3.CREATE.getValue());
// credentials
request.writeInt(0); // auth null
@@ -79,7 +79,7 @@ public class TestOutOfOrderWrite {
byte[] data) {
XDR request = new XDR();
RpcCall.write(request, xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
- Nfs3Constant.NFSPROC3_WRITE);
+ Nfs3Constant.NFSPROC3.WRITE.getValue());
// credentials
request.writeInt(0); // auth null
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java?rev=1501851&r1=1501850&r2=1501851&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java Wed Jul 10 17:01:28 2013
@@ -28,38 +28,38 @@ import org.junit.Test;
public class TestRpcProgramNfs3 {
@Test(timeout=1000)
public void testIdempotent() {
- int[][] procedures = {
- { Nfs3Constant.NFSPROC3_NULL, 1 },
- { Nfs3Constant.NFSPROC3_GETATTR, 1 },
- { Nfs3Constant.NFSPROC3_SETATTR, 1 },
- { Nfs3Constant.NFSPROC3_LOOKUP, 1 },
- { Nfs3Constant.NFSPROC3_ACCESS, 1 },
- { Nfs3Constant.NFSPROC3_READLINK, 1 },
- { Nfs3Constant.NFSPROC3_READ, 1 },
- { Nfs3Constant.NFSPROC3_WRITE, 1 },
- { Nfs3Constant.NFSPROC3_CREATE, 0 },
- { Nfs3Constant.NFSPROC3_MKDIR, 0 },
- { Nfs3Constant.NFSPROC3_SYMLINK, 0 },
- { Nfs3Constant.NFSPROC3_MKNOD, 0 },
- { Nfs3Constant.NFSPROC3_REMOVE, 0 },
- { Nfs3Constant.NFSPROC3_RMDIR, 0 },
- { Nfs3Constant.NFSPROC3_RENAME, 0 },
- { Nfs3Constant.NFSPROC3_LINK, 0 },
- { Nfs3Constant.NFSPROC3_READDIR, 1 },
- { Nfs3Constant.NFSPROC3_READDIRPLUS, 1 },
- { Nfs3Constant.NFSPROC3_FSSTAT, 1 },
- { Nfs3Constant.NFSPROC3_FSINFO, 1 },
- { Nfs3Constant.NFSPROC3_PATHCONF, 1 },
- { Nfs3Constant.NFSPROC3_COMMIT, 1 } };
- for (int[] procedure : procedures) {
- boolean idempotent = procedure[1] == 1;
- int proc = procedure[0];
+ Object[][] procedures = {
+ { Nfs3Constant.NFSPROC3.NULL, 1 },
+ { Nfs3Constant.NFSPROC3.GETATTR, 1 },
+ { Nfs3Constant.NFSPROC3.SETATTR, 1 },
+ { Nfs3Constant.NFSPROC3.LOOKUP, 1 },
+ { Nfs3Constant.NFSPROC3.ACCESS, 1 },
+ { Nfs3Constant.NFSPROC3.READLINK, 1 },
+ { Nfs3Constant.NFSPROC3.READ, 1 },
+ { Nfs3Constant.NFSPROC3.WRITE, 1 },
+ { Nfs3Constant.NFSPROC3.CREATE, 0 },
+ { Nfs3Constant.NFSPROC3.MKDIR, 0 },
+ { Nfs3Constant.NFSPROC3.SYMLINK, 0 },
+ { Nfs3Constant.NFSPROC3.MKNOD, 0 },
+ { Nfs3Constant.NFSPROC3.REMOVE, 0 },
+ { Nfs3Constant.NFSPROC3.RMDIR, 0 },
+ { Nfs3Constant.NFSPROC3.RENAME, 0 },
+ { Nfs3Constant.NFSPROC3.LINK, 0 },
+ { Nfs3Constant.NFSPROC3.READDIR, 1 },
+ { Nfs3Constant.NFSPROC3.READDIRPLUS, 1 },
+ { Nfs3Constant.NFSPROC3.FSSTAT, 1 },
+ { Nfs3Constant.NFSPROC3.FSINFO, 1 },
+ { Nfs3Constant.NFSPROC3.PATHCONF, 1 },
+ { Nfs3Constant.NFSPROC3.COMMIT, 1 } };
+ for (Object[] procedure : procedures) {
+ boolean idempotent = procedure[1].equals(Integer.valueOf(1));
+ Nfs3Constant.NFSPROC3 proc = (Nfs3Constant.NFSPROC3)procedure[0];
if (idempotent) {
Assert.assertTrue(("Procedure " + proc + " should be idempotent"),
- RpcProgramNfs3.isIdempotent(proc));
+ proc.isIdempotent());
} else {
Assert.assertFalse(("Procedure " + proc + " should be non-idempotent"),
- RpcProgramNfs3.isIdempotent(proc));
+ proc.isIdempotent());
}
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1501851&r1=1501850&r2=1501851&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Jul 10 17:01:28 2013
@@ -14,6 +14,8 @@ Trunk (Unreleased)
HDFS-4762 Provide HDFS based NFSv3 and Mountd implementation (brandonli)
+ HDFS-4962 Use enum for nfs constants (Nicholas SZE via jing9)
+
IMPROVEMENTS
HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.