You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by br...@apache.org on 2013/07/02 19:31:59 UTC

svn commit: r1499029 [3/3] - in /hadoop/common/trunk/hadoop-hdfs-project: ./ hadoop-hdfs-nfs/ hadoop-hdfs-nfs/dev-support/ hadoop-hdfs-nfs/src/ hadoop-hdfs-nfs/src/main/ hadoop-hdfs-nfs/src/main/java/ hadoop-hdfs-nfs/src/main/java/org/ hadoop-hdfs-nfs/...

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java?rev=1499029&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java Tue Jul  2 17:31:58 2013
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.nfs;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3Utils;
+import org.apache.hadoop.nfs.nfs3.FileHandle;
+import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
+import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
+import org.apache.hadoop.nfs.nfs3.Nfs3Status;
+import org.apache.hadoop.nfs.nfs3.request.CREATE3Request;
+import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
+import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
+import org.apache.hadoop.oncrpc.RegistrationClient;
+import org.apache.hadoop.oncrpc.RpcCall;
+import org.apache.hadoop.oncrpc.RpcFrameDecoder;
+import org.apache.hadoop.oncrpc.RpcReply;
+import org.apache.hadoop.oncrpc.SimpleTcpClient;
+import org.apache.hadoop.oncrpc.SimpleTcpClientHandler;
+import org.apache.hadoop.oncrpc.XDR;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.channel.ChannelPipeline;
+import org.jboss.netty.channel.ChannelPipelineFactory;
+import org.jboss.netty.channel.Channels;
+import org.jboss.netty.channel.MessageEvent;
+
+public class TestOutOfOrderWrite {
+  public final static Log LOG = LogFactory.getLog(TestOutOfOrderWrite.class);
+
+  static FileHandle handle = null;
+  static Channel channel;
+
+  static byte[] data1 = new byte[1000];
+  static byte[] data2 = new byte[1000];
+  static byte[] data3 = new byte[1000];
+
+  static XDR create() {
+    XDR request = new XDR();
+    RpcCall.write(request, 0x8000004c, Nfs3Constant.PROGRAM,
+        Nfs3Constant.VERSION, Nfs3Constant.NFSPROC3_CREATE);
+
+    // credentials
+    request.writeInt(0); // auth null
+    request.writeInt(0); // length zero
+    // verifier
+    request.writeInt(0); // auth null
+    request.writeInt(0); // length zero
+
+    SetAttr3 objAttr = new SetAttr3();
+    CREATE3Request createReq = new CREATE3Request(new FileHandle("/"),
+        "out-of-order-write" + System.currentTimeMillis(), 0, objAttr, 0);
+    createReq.serialize(request);
+    return request;
+  }
+
+  static XDR write(FileHandle handle, int xid, long offset, int count,
+      byte[] data) {
+    XDR request = new XDR();
+    RpcCall.write(request, xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
+        Nfs3Constant.NFSPROC3_WRITE);
+
+    // credentials
+    request.writeInt(0); // auth null
+    request.writeInt(0); // length zero
+    // verifier
+    request.writeInt(0); // auth null
+    request.writeInt(0); // length zero
+    WRITE3Request write1 = new WRITE3Request(handle, offset, count,
+        WriteStableHow.UNSTABLE, ByteBuffer.wrap(data));
+    write1.serialize(request);
+    return request;
+  }
+
+  static void testRequest(XDR request) {
+    RegistrationClient registrationClient = new RegistrationClient("localhost",
+        Nfs3Constant.SUN_RPCBIND, request);
+    registrationClient.run();
+  }
+
+  static class WriteHandler extends SimpleTcpClientHandler {
+
+    public WriteHandler(XDR request) {
+      super(request);
+    }
+
+    @Override
+    public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
+      // Get handle from create response
+      ChannelBuffer buf = (ChannelBuffer) e.getMessage();
+      XDR rsp = new XDR(buf.array());
+      if (rsp.getBytes().length == 0) {
+        LOG.info("rsp length is zero, why?");
+        return;
+      }
+      LOG.info("rsp length=" + rsp.getBytes().length);
+
+      RpcReply reply = RpcReply.read(rsp);
+      int xid = reply.getXid();
+      // Only process the create response
+      if (xid != 0x8000004c) {
+        return;
+      }
+      int status = rsp.readInt();
+      if (status != Nfs3Status.NFS3_OK) {
+        LOG.error("Create failed, status =" + status);
+        return;
+      }
+      LOG.info("Create succeeded");
+      rsp.readBoolean(); // value follow
+      handle = new FileHandle();
+      handle.deserialize(rsp);
+      channel = e.getChannel();
+    }
+  }
+
+  static class WriteClient extends SimpleTcpClient {
+
+    public WriteClient(String host, int port, XDR request, Boolean oneShot) {
+      super(host, port, request, oneShot);
+    }
+
+    @Override
+    protected ChannelPipelineFactory setPipelineFactory() {
+      this.pipelineFactory = new ChannelPipelineFactory() {
+        public ChannelPipeline getPipeline() {
+          return Channels.pipeline(new RpcFrameDecoder(), new WriteHandler(
+              request));
+        }
+      };
+      return this.pipelineFactory;
+    }
+
+  }
+
+  public static void main(String[] args) throws InterruptedException {
+
+    Arrays.fill(data1, (byte) 7);
+    Arrays.fill(data2, (byte) 8);
+    Arrays.fill(data3, (byte) 9);
+
+    // NFS3 Create request
+    WriteClient client = new WriteClient("localhost", Nfs3Constant.PORT,
+        create(), false);
+    client.run();
+
+    while (handle == null) {
+      Thread.sleep(1000);
+      System.out.println("handle is still null...");
+    }
+    LOG.info("Send write1 request");
+
+    XDR writeReq;
+
+    writeReq = write(handle, 0x8000005c, 2000, 1000, data3);
+    Nfs3Utils.writeChannel(channel, writeReq);
+    writeReq = write(handle, 0x8000005d, 1000, 1000, data2);
+    Nfs3Utils.writeChannel(channel, writeReq);
+    writeReq = write(handle, 0x8000005e, 0, 1000, data1);
+    Nfs3Utils.writeChannel(channel, writeReq);
+
+    // TODO: convert to Junit test, and validate result automatically
+  }
+}
\ No newline at end of file

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestPortmapRegister.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestPortmapRegister.java?rev=1499029&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestPortmapRegister.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestPortmapRegister.java Tue Jul  2 17:31:58 2013
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.nfs;
+
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd;
+import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
+import org.apache.hadoop.oncrpc.RegistrationClient;
+import org.apache.hadoop.oncrpc.RpcCall;
+import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.portmap.PortmapMapping;
+import org.apache.hadoop.portmap.PortmapRequest;
+
+public class TestPortmapRegister {
+  
+  public static final Log LOG = LogFactory.getLog(TestPortmapRegister.class);
+  
+  static void testRequest(XDR request, XDR request2) {
+    RegistrationClient registrationClient = new RegistrationClient(
+        "localhost", Nfs3Constant.SUN_RPCBIND, request);
+    registrationClient.run();
+  }
+ 
+  public static void main(String[] args) throws InterruptedException {
+    PortmapMapping mapEntry = new PortmapMapping(RpcProgramMountd.PROGRAM,
+        RpcProgramMountd.VERSION_1, PortmapMapping.TRANSPORT_UDP,
+        RpcProgramMountd.PORT);
+    XDR mappingRequest = PortmapRequest.create(mapEntry);
+    RegistrationClient registrationClient = new RegistrationClient(
+        "localhost", Nfs3Constant.SUN_RPCBIND, mappingRequest);
+    registrationClient.run();
+        
+    Thread t1 = new Runtest1();
+    //Thread t2 = testa.new Runtest2();
+    t1.start();
+    //t2.start();
+    t1.join();
+    //t2.join();
+    //testDump();
+  }
+  
+  static class Runtest1 extends Thread {
+    @Override
+    public void run() {
+      //testGetportMount();
+      PortmapMapping mapEntry = new PortmapMapping(RpcProgramMountd.PROGRAM,
+          RpcProgramMountd.VERSION_1, PortmapMapping.TRANSPORT_UDP,
+          RpcProgramMountd.PORT);
+      XDR req = PortmapRequest.create(mapEntry);
+      testRequest(req, req);
+    }
+  }
+  
+  static class Runtest2 extends Thread {
+    @Override
+    public void run() {
+      testDump();
+    }
+  }
+  
+  static void createPortmapXDRheader(XDR xdr_out, int procedure) {
+    // TODO: Move this to RpcRequest
+    RpcCall.write(xdr_out, 0, 100000, 2, procedure);
+    xdr_out.writeInt(0); //no auth
+    xdr_out.writeInt(0);
+    xdr_out.writeInt(0);
+    xdr_out.writeInt(0);
+    
+    /*
+    xdr_out.putInt(1); //unix auth
+    xdr_out.putVariableOpaque(new byte[20]);
+    xdr_out.putInt(0);
+    xdr_out.putInt(0);
+*/
+  }
+ 
+  static void testGetportMount() {
+    XDR xdr_out = new XDR();
+
+    createPortmapXDRheader(xdr_out, 3);
+
+    xdr_out.writeInt(100005);
+    xdr_out.writeInt(1);
+    xdr_out.writeInt(6);
+    xdr_out.writeInt(0);
+
+    XDR request2 = new XDR();
+
+    createPortmapXDRheader(xdr_out, 3);
+    request2.writeInt(100005);
+    request2.writeInt(1);
+    request2.writeInt(6);
+    request2.writeInt(0);
+
+    testRequest(xdr_out, request2);
+  }
+  
+  static void testGetport() {
+    XDR xdr_out = new XDR();
+
+    createPortmapXDRheader(xdr_out, 3);
+
+    xdr_out.writeInt(100003);
+    xdr_out.writeInt(3);
+    xdr_out.writeInt(6);
+    xdr_out.writeInt(0);
+
+    XDR request2 = new XDR();
+
+    createPortmapXDRheader(xdr_out, 3);
+    request2.writeInt(100003);
+    request2.writeInt(3);
+    request2.writeInt(6);
+    request2.writeInt(0);
+
+    testRequest(xdr_out, request2);
+  }
+  
+  static void testDump() {
+    XDR xdr_out = new XDR();
+    createPortmapXDRheader(xdr_out, 4);
+    testRequest(xdr_out, xdr_out);
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java?rev=1499029&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java Tue Jul  2 17:31:58 2013
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.nfs;
+
+import java.io.IOException;
+import java.net.DatagramPacket;
+import java.net.DatagramSocket;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
+import org.apache.hadoop.oncrpc.RpcCall;
+import org.apache.hadoop.oncrpc.XDR;
+
+// TODO: convert this to Junit
+public class TestUdpServer {
+  static void testRequest(XDR request, XDR request2) {
+    try {
+      DatagramSocket clientSocket = new DatagramSocket();
+      InetAddress IPAddress = InetAddress.getByName("localhost");
+      byte[] sendData = request.getBytes();
+      byte[] receiveData = new byte[65535];
+
+      DatagramPacket sendPacket = new DatagramPacket(sendData, sendData.length,
+          IPAddress, Nfs3Constant.SUN_RPCBIND);
+      clientSocket.send(sendPacket);
+      DatagramPacket receivePacket = new DatagramPacket(receiveData,
+          receiveData.length);
+      clientSocket.receive(receivePacket);
+      clientSocket.close();
+
+    } catch (UnknownHostException e) {
+      System.err.println("Don't know about host: localhost.");
+      System.exit(1);
+    } catch (IOException e) {
+      System.err.println("Couldn't get I/O for "
+          + "the connection to: localhost.");
+      System.exit(1);
+    }
+  }
+ 
+  public static void main(String[] args) throws InterruptedException {
+    Thread t1 = new Runtest1();
+    // TODO: cleanup
+    //Thread t2 = new Runtest2();
+    t1.start();
+    //t2.start();
+    t1.join();
+    //t2.join();
+    //testDump();
+  }
+  
+  static class Runtest1 extends Thread {
+    @Override
+    public void run() {
+      testGetportMount();
+    }
+  }
+  
+  static class Runtest2 extends Thread {
+    @Override
+    public void run() {
+      testDump();
+    }
+  }
+  
+  static void createPortmapXDRheader(XDR xdr_out, int procedure) {
+    // Make this a method
+    RpcCall.write(xdr_out, 0, 100000, 2, procedure);
+  }
+ 
+  static void testGetportMount() {
+    XDR xdr_out = new XDR();
+    createPortmapXDRheader(xdr_out, 3);
+    xdr_out.writeInt(100005);
+    xdr_out.writeInt(1);
+    xdr_out.writeInt(6);
+    xdr_out.writeInt(0);
+
+    XDR request2 = new XDR();
+    createPortmapXDRheader(xdr_out, 3);
+    request2.writeInt(100005);
+    request2.writeInt(1);
+    request2.writeInt(6);
+    request2.writeInt(0);
+
+    testRequest(xdr_out, request2);
+  }
+  
+  static void testGetport() {
+    XDR xdr_out = new XDR();
+
+    createPortmapXDRheader(xdr_out, 3);
+
+    xdr_out.writeInt(100003);
+    xdr_out.writeInt(3);
+    xdr_out.writeInt(6);
+    xdr_out.writeInt(0);
+
+    XDR request2 = new XDR();
+
+    createPortmapXDRheader(xdr_out, 3);
+    request2.writeInt(100003);
+    request2.writeInt(3);
+    request2.writeInt(6);
+    request2.writeInt(0);
+
+    testRequest(xdr_out, request2);
+  }
+  
+  static void testDump() {
+    XDR xdr_out = new XDR();
+    createPortmapXDRheader(xdr_out, 4);
+    testRequest(xdr_out, xdr_out);
+  }
+}
\ No newline at end of file

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java?rev=1499029&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java Tue Jul  2 17:31:58 2013
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.nfs.nfs3;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+public class TestDFSClientCache {
+  @Test
+  public void testLruTable() throws IOException {
+    DFSClientCache cache = new DFSClientCache(new Configuration(), 3);
+    DFSClient client = Mockito.mock(DFSClient.class);
+    cache.put("a", client);
+    assertTrue(cache.containsKey("a"));
+
+    cache.put("b", client);
+    cache.put("c", client);
+    cache.put("d", client);
+    assertTrue(cache.usedSize() == 3);
+    assertFalse(cache.containsKey("a"));
+
+    // Cache should have d,c,b in LRU order
+    assertTrue(cache.containsKey("b"));
+    // Do a lookup to make b the most recently used
+    assertTrue(cache.get("b") != null);
+
+    cache.put("e", client);
+    assertTrue(cache.usedSize() == 3);
+    // c should be replaced with e, and cache has e,b,d
+    assertFalse(cache.containsKey("c"));
+    assertTrue(cache.containsKey("e"));
+    assertTrue(cache.containsKey("b"));
+    assertTrue(cache.containsKey("d"));
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOffsetRange.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOffsetRange.java?rev=1499029&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOffsetRange.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOffsetRange.java Tue Jul  2 17:31:58 2013
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.nfs.nfs3;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.junit.Test;
+
+public class TestOffsetRange {
+  @Test(expected = IllegalArgumentException.class)
+  public void testConstructor1() throws IOException {
+    new OffsetRange(0, 0);
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testConstructor2() throws IOException {
+    new OffsetRange(-1, 0);
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testConstructor3() throws IOException {
+    new OffsetRange(-3, -1);
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testConstructor4() throws IOException {
+    new OffsetRange(-3, 100);
+  }
+
+  @Test
+  public void testCompare() throws IOException {
+    OffsetRange r1 = new OffsetRange(0, 1);
+    OffsetRange r2 = new OffsetRange(1, 3);
+    OffsetRange r3 = new OffsetRange(1, 3);
+    OffsetRange r4 = new OffsetRange(3, 4);
+
+    assertTrue(r2.compareTo(r3) == 0);
+    assertTrue(r2.compareTo(r1) == 1);
+    assertTrue(r2.compareTo(r4) == -1);
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java?rev=1499029&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java Tue Jul  2 17:31:58 2013
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.nfs.nfs3;
+
+import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
+import org.junit.Assert;
+import org.junit.Test;
+
+
+/**
+ * Tests for {@link RpcProgramNfs3}
+ */
+public class TestRpcProgramNfs3 {
+  @Test(timeout=1000)
+  public void testIdempotent() {
+    int[][] procedures = {
+        { Nfs3Constant.NFSPROC3_NULL, 1 },
+        { Nfs3Constant.NFSPROC3_GETATTR, 1 },
+        { Nfs3Constant.NFSPROC3_SETATTR, 1 },
+        { Nfs3Constant.NFSPROC3_LOOKUP, 1 },
+        { Nfs3Constant.NFSPROC3_ACCESS, 1 },
+        { Nfs3Constant.NFSPROC3_READLINK, 1 },
+        { Nfs3Constant.NFSPROC3_READ, 1 },
+        { Nfs3Constant.NFSPROC3_WRITE, 1 },
+        { Nfs3Constant.NFSPROC3_CREATE, 0 },
+        { Nfs3Constant.NFSPROC3_MKDIR, 0 },
+        { Nfs3Constant.NFSPROC3_SYMLINK, 0 },
+        { Nfs3Constant.NFSPROC3_MKNOD, 0 },
+        { Nfs3Constant.NFSPROC3_REMOVE, 0 },
+        { Nfs3Constant.NFSPROC3_RMDIR, 0 },
+        { Nfs3Constant.NFSPROC3_RENAME, 0 },
+        { Nfs3Constant.NFSPROC3_LINK, 0 },
+        { Nfs3Constant.NFSPROC3_READDIR, 1 },
+        { Nfs3Constant.NFSPROC3_READDIRPLUS, 1 },
+        { Nfs3Constant.NFSPROC3_FSSTAT, 1 },
+        { Nfs3Constant.NFSPROC3_FSINFO, 1 },
+        { Nfs3Constant.NFSPROC3_PATHCONF, 1 },
+        { Nfs3Constant.NFSPROC3_COMMIT, 1 } };
+    for (int[] procedure : procedures) {
+      boolean idempotent = procedure[1] == 1;
+      int proc = procedure[0];
+      if (idempotent) {
+        Assert.assertTrue(("Procedure " + proc + " should be idempotent"),
+            RpcProgramNfs3.isIdempotent(proc));
+      } else {
+        Assert.assertFalse(("Procedure " + proc + " should be non-idempotent"),
+            RpcProgramNfs3.isIdempotent(proc));
+      }
+    }
+  }
+}

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1499029&r1=1499028&r2=1499029&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Jul  2 17:31:58 2013
@@ -12,6 +12,8 @@ Trunk (Unreleased)
 
     HDFS-4659 Support setting execution bit for regular files (Brandon Li via sanjay)
 
+    HDFS-4762 Provide HDFS based NFSv3 and Mountd implementation (brandonli)
+
   IMPROVEMENTS
 
     HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.

Modified: hadoop/common/trunk/hadoop-hdfs-project/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/pom.xml?rev=1499029&r1=1499028&r2=1499029&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/pom.xml (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/pom.xml Tue Jul  2 17:31:58 2013
@@ -34,6 +34,7 @@ http://maven.apache.org/xsd/maven-4.0.0.
     <module>hadoop-hdfs</module>
     <module>hadoop-hdfs-httpfs</module>
     <module>hadoop-hdfs/src/contrib/bkjournal</module>
+    <module>hadoop-hdfs-nfs</module>
   </modules>
 
   <build>