You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by wa...@apache.org on 2013/10/17 07:32:55 UTC

svn commit: r1532967 [5/7] - in /hadoop/common/branches/HDFS-4949/hadoop-common-project: hadoop-annotations/ hadoop-common/ hadoop-common/dev-support/ hadoop-common/src/main/bin/ hadoop-common/src/main/conf/ hadoop-common/src/main/docs/ hadoop-common/s...

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java Thu Oct 17 05:32:42 2013
@@ -17,29 +17,592 @@
  */
 package org.apache.hadoop.io;
 
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.CompressionInputStream;
+import org.apache.hadoop.io.compress.CompressionOutputStream;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.util.Progressable;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
 
-import junit.framework.TestCase;
+import static org.mockito.Mockito.*;
 
-public class TestMapFile extends TestCase {
+public class TestMapFile {
+  
+  private static final Path TEST_DIR = new Path(
+      System.getProperty("test.build.data", "/tmp"),
+      TestMapFile.class.getSimpleName());
+  
   private static Configuration conf = new Configuration();
 
+  @Before
+  public void setup() throws Exception {
+    LocalFileSystem fs = FileSystem.getLocal(conf);
+    if (fs.exists(TEST_DIR) && !fs.delete(TEST_DIR, true)) {
+      Assert.fail("Can't clean up test root dir");
+    }
+    fs.mkdirs(TEST_DIR);
+  }
+  
+  private static final Progressable defaultProgressable = new Progressable() {
+    @Override
+    public void progress() {
+    }
+  };
+
+  private static final CompressionCodec defaultCodec = new CompressionCodec() {
+    @Override
+    public CompressionOutputStream createOutputStream(OutputStream out)
+        throws IOException {
+      return null;
+    }
+
+    @Override
+    public CompressionOutputStream createOutputStream(OutputStream out,
+        Compressor compressor) throws IOException {
+      return null;
+    }
+
+    @Override
+    public Class<? extends Compressor> getCompressorType() {
+      return null;
+    }
+
+    @Override
+    public Compressor createCompressor() {
+      return null;
+    }
+
+    @Override
+    public CompressionInputStream createInputStream(InputStream in)
+        throws IOException {
+      return null;
+    }
+
+    @Override
+    public CompressionInputStream createInputStream(InputStream in,
+        Decompressor decompressor) throws IOException {
+      return null;
+    }
+
+    @Override
+    public Class<? extends Decompressor> getDecompressorType() {
+      return null;
+    }
+
+    @Override
+    public Decompressor createDecompressor() {
+      return null;
+    }
+
+    @Override
+    public String getDefaultExtension() {
+      return null;
+    }
+  };
+
+  private MapFile.Writer createWriter(String fileName,
+      Class<? extends WritableComparable<?>> keyClass,
+      Class<? extends Writable> valueClass) throws IOException {
+    Path dirName = new Path(TEST_DIR, fileName);
+    MapFile.Writer.setIndexInterval(conf, 4);
+    return new MapFile.Writer(conf, dirName, MapFile.Writer.keyClass(keyClass),
+        MapFile.Writer.valueClass(valueClass));
+  }
+
+  private MapFile.Reader createReader(String fileName,
+      Class<? extends WritableComparable<?>> keyClass) throws IOException {
+    Path dirName = new Path(TEST_DIR, fileName);
+    return new MapFile.Reader(dirName, conf,
+        MapFile.Reader.comparator(new WritableComparator(keyClass)));
+  }
+  
+  /**
+   * test {@code MapFile.Reader.getClosest()} method 
+   *
+   */
+  @Test
+  public void testGetClosestOnCurrentApi() throws Exception {
+    final String TEST_PREFIX = "testGetClosestOnCurrentApi.mapfile";
+    MapFile.Writer writer = createWriter(TEST_PREFIX, Text.class, Text.class);
+    int FIRST_KEY = 1;
+    // Test keys: 11,21,31,...,91
+    for (int i = FIRST_KEY; i < 100; i += 10) {      
+      Text t = new Text(Integer.toString(i));
+      writer.append(t, t);
+    }
+    writer.close();
+
+    MapFile.Reader reader = createReader(TEST_PREFIX, Text.class);
+    Text key = new Text("55");
+    Text value = new Text();
+
+    // Test get closest with step forward
+    Text closest = (Text) reader.getClosest(key, value);
+    assertEquals(new Text("61"), closest);
+
+    // Test get closest with step back
+    closest = (Text) reader.getClosest(key, value, true);
+    assertEquals(new Text("51"), closest);
+
+    // Test get closest when we pass explicit key
+    final Text explicitKey = new Text("21");
+    closest = (Text) reader.getClosest(explicitKey, value);
+    assertEquals(new Text("21"), explicitKey);
+
+    // Test what happens at boundaries. Assert if searching a key that is
+    // less than first key in the mapfile, that the first key is returned.
+    key = new Text("00");
+    closest = (Text) reader.getClosest(key, value);
+    assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));
+
+    // Assert that null is returned if key is > last entry in mapfile.
+    key = new Text("92");
+    closest = (Text) reader.getClosest(key, value);
+    assertNull("Not null key in testGetClosestWithNewCode", closest);
+
+    // If we were looking for the key before, we should get the last key
+    closest = (Text) reader.getClosest(key, value, true);
+    assertEquals(new Text("91"), closest);
+  }
+  
+  /**
+   * test {@code MapFile.Reader.midKey() } method 
+   */
+  @Test
+  public void testMidKeyOnCurrentApi() throws Exception {
+    // Write a mapfile of simple data: keys are
+    final String TEST_PREFIX = "testMidKeyOnCurrentApi.mapfile";
+    MapFile.Writer writer = createWriter(TEST_PREFIX, IntWritable.class,
+        IntWritable.class);
+    // 0,1,....9
+    int SIZE = 10;
+    for (int i = 0; i < SIZE; i++)
+      writer.append(new IntWritable(i), new IntWritable(i));
+    writer.close();
+
+    MapFile.Reader reader = createReader(TEST_PREFIX, IntWritable.class);
+    assertEquals(new IntWritable((SIZE - 1) / 2), reader.midKey());
+  }
+  
+  /**
+   * test  {@code MapFile.Writer.rename()} method 
+   */
+  @Test
+  public void testRename() {
+    final String NEW_FILE_NAME = "test-new.mapfile";
+    final String OLD_FILE_NAME = "test-old.mapfile";
+    try {
+      FileSystem fs = FileSystem.getLocal(conf);
+      MapFile.Writer writer = createWriter(OLD_FILE_NAME, IntWritable.class,
+          IntWritable.class);
+      writer.close();
+      MapFile.rename(fs, new Path(TEST_DIR, OLD_FILE_NAME).toString(), 
+          new Path(TEST_DIR, NEW_FILE_NAME).toString());
+      MapFile.delete(fs, new Path(TEST_DIR, NEW_FILE_NAME).toString());
+    } catch (IOException ex) {
+      fail("testRename error " + ex);
+    }
+  }
+  
+  /**
+   * test {@code MapFile.rename()} 
+   *  method with throwing {@code IOException}  
+   */
+  @Test
+  public void testRenameWithException() {
+    final String ERROR_MESSAGE = "Can't rename file";
+    final String NEW_FILE_NAME = "test-new.mapfile";
+    final String OLD_FILE_NAME = "test-old.mapfile";
+    try {
+      FileSystem fs = FileSystem.getLocal(conf);
+      FileSystem spyFs = spy(fs);
+
+      MapFile.Writer writer = createWriter(OLD_FILE_NAME, IntWritable.class,
+          IntWritable.class);
+      writer.close();
+
+      Path oldDir = new Path(TEST_DIR, OLD_FILE_NAME);
+      Path newDir = new Path(TEST_DIR, NEW_FILE_NAME);
+      when(spyFs.rename(oldDir, newDir)).thenThrow(
+          new IOException(ERROR_MESSAGE));
+
+      MapFile.rename(spyFs, oldDir.toString(), newDir.toString());
+      fail("testRenameWithException no exception error !!!");
+    } catch (IOException ex) {
+      assertEquals("testRenameWithException invalid IOExceptionMessage !!!",
+          ex.getMessage(), ERROR_MESSAGE);
+    }
+  }
+
+  @Test
+  public void testRenameWithFalse() {
+    final String ERROR_MESSAGE = "Could not rename";
+    final String NEW_FILE_NAME = "test-new.mapfile";
+    final String OLD_FILE_NAME = "test-old.mapfile";
+    try {
+      FileSystem fs = FileSystem.getLocal(conf);
+      FileSystem spyFs = spy(fs);
+
+      MapFile.Writer writer = createWriter(OLD_FILE_NAME, IntWritable.class,
+          IntWritable.class);
+      writer.close();
+
+      Path oldDir = new Path(TEST_DIR, OLD_FILE_NAME);
+      Path newDir = new Path(TEST_DIR, NEW_FILE_NAME);
+      when(spyFs.rename(oldDir, newDir)).thenReturn(false);
+
+      MapFile.rename(spyFs, oldDir.toString(), newDir.toString());
+      fail("testRenameWithException no exception error !!!");
+    } catch (IOException ex) {
+      assertTrue("testRenameWithFalse invalid IOExceptionMessage error !!!", ex
+          .getMessage().startsWith(ERROR_MESSAGE));
+    }
+  }
+  
+  /**
+   * test throwing {@code IOException} in {@code MapFile.Writer} constructor    
+   */
+  @Test
+  public void testWriteWithFailDirCreation() {
+    String ERROR_MESSAGE = "Mkdirs failed to create directory";
+    Path dirName = new Path(TEST_DIR, "fail.mapfile");
+    MapFile.Writer writer = null;
+    try {
+      FileSystem fs = FileSystem.getLocal(conf);
+      FileSystem spyFs = spy(fs);
+      Path pathSpy = spy(dirName);
+      when(pathSpy.getFileSystem(conf)).thenReturn(spyFs);
+      when(spyFs.mkdirs(dirName)).thenReturn(false);
+
+      writer = new MapFile.Writer(conf, pathSpy,
+          MapFile.Writer.keyClass(IntWritable.class),
+          MapFile.Writer.valueClass(Text.class));
+      fail("testWriteWithFailDirCreation error !!!");
+    } catch (IOException ex) {
+      assertTrue("testWriteWithFailDirCreation ex error !!!", ex.getMessage()
+          .startsWith(ERROR_MESSAGE));
+    } finally {
+      if (writer != null)
+        try {
+          writer.close();
+        } catch (IOException e) {
+        }
+    }
+  }
+
+  /**
+   * test {@code MapFile.Reader.finalKey()} method
+   */
+  @Test
+  public void testOnFinalKey() {
+    final String TEST_METHOD_KEY = "testOnFinalKey.mapfile";
+    int SIZE = 10;
+    try {
+      MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
+          IntWritable.class);
+      for (int i = 0; i < SIZE; i++)
+        writer.append(new IntWritable(i), new IntWritable(i));
+      writer.close();
+
+      MapFile.Reader reader = createReader(TEST_METHOD_KEY, IntWritable.class);
+      IntWritable expectedKey = new IntWritable(0);
+      reader.finalKey(expectedKey);
+      assertEquals("testOnFinalKey not same !!!", expectedKey, new IntWritable(
+          9));
+    } catch (IOException ex) {
+      fail("testOnFinalKey error !!!");
+    }
+  }
+  
+  /**
+   * test {@code MapFile.Writer} constructor with key, value
+   * and validate it with {@code keyClass(), valueClass()} methods 
+   */
+  @Test
+  public void testKeyValueClasses() {
+    Class<? extends WritableComparable<?>> keyClass = IntWritable.class;
+    Class<?> valueClass = Text.class;
+    try {
+      createWriter("testKeyValueClasses.mapfile", IntWritable.class, Text.class);
+      assertNotNull("writer key class null error !!!",
+          MapFile.Writer.keyClass(keyClass));
+      assertNotNull("writer value class null error !!!",
+          MapFile.Writer.valueClass(valueClass));
+    } catch (IOException ex) {
+      fail(ex.getMessage());
+    }
+  }
+  
+  /**
+   * test {@code MapFile.Reader.getClosest() } with wrong class key
+   */
+  @Test
+  public void testReaderGetClosest() throws Exception {
+    final String TEST_METHOD_KEY = "testReaderWithWrongKeyClass.mapfile";
+    try {
+      MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
+          Text.class);
+
+      for (int i = 0; i < 10; i++)
+        writer.append(new IntWritable(i), new Text("value" + i));
+      writer.close();
+
+      MapFile.Reader reader = createReader(TEST_METHOD_KEY, Text.class);
+      reader.getClosest(new Text("2"), new Text(""));
+      fail("no excepted exception in testReaderWithWrongKeyClass !!!");
+    } catch (IOException ex) {
+      /* Should be thrown to pass the test */
+    }
+  }
+  
+  /**
+   * test {@code MapFile.Writer.append() } with wrong key class
+   */
+  @Test
+  public void testReaderWithWrongValueClass() {
+    final String TEST_METHOD_KEY = "testReaderWithWrongValueClass.mapfile";
+    try {
+      MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
+          Text.class);
+      writer.append(new IntWritable(0), new IntWritable(0));
+      fail("no excepted exception in testReaderWithWrongKeyClass !!!");
+    } catch (IOException ex) {
+      /* Should be thrown to pass the test */
+    }
+  }
+  
+  /**
+   * test {@code MapFile.Reader.next(key, value)} for iteration.
+   */
+  @Test
+  public void testReaderKeyIteration() {
+    final String TEST_METHOD_KEY = "testReaderKeyIteration.mapfile";
+    int SIZE = 10;
+    int ITERATIONS = 5;
+    try {
+      MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
+          Text.class);
+      int start = 0;
+      for (int i = 0; i < SIZE; i++)
+        writer.append(new IntWritable(i), new Text("Value:" + i));
+      writer.close();
+
+      MapFile.Reader reader = createReader(TEST_METHOD_KEY, IntWritable.class);
+      // test iteration
+      Writable startValue = new Text("Value:" + start);
+      int i = 0;
+      while (i++ < ITERATIONS) {
+        IntWritable key = new IntWritable(start);
+        Writable value = startValue;
+        while (reader.next(key, value)) {
+          assertNotNull(key);
+          assertNotNull(value);
+        }
+        reader.reset();
+      }
+      assertTrue("reader seek error !!!",
+          reader.seek(new IntWritable(SIZE / 2)));
+      assertFalse("reader seek error !!!",
+          reader.seek(new IntWritable(SIZE * 2)));
+    } catch (IOException ex) {
+      fail("reader seek error !!!");
+    }
+  }
+
+  /**
+   * test {@code MapFile.Writer.testFix} method
+   */
+  @Test
+  public void testFix() {
+    final String INDEX_LESS_MAP_FILE = "testFix.mapfile";
+    int PAIR_SIZE = 20;
+    try {
+      FileSystem fs = FileSystem.getLocal(conf);
+      Path dir = new Path(TEST_DIR, INDEX_LESS_MAP_FILE);
+      MapFile.Writer writer = createWriter(INDEX_LESS_MAP_FILE,
+          IntWritable.class, Text.class);
+      for (int i = 0; i < PAIR_SIZE; i++)
+        writer.append(new IntWritable(0), new Text("value"));
+      writer.close();
+
+      File indexFile = new File(".", "." + INDEX_LESS_MAP_FILE + "/index");
+      boolean isDeleted = false;
+      if (indexFile.exists())
+        isDeleted = indexFile.delete();
+
+      if (isDeleted)
+        assertTrue("testFix error !!!",
+            MapFile.fix(fs, dir, IntWritable.class, Text.class, true, conf) == PAIR_SIZE);
+    } catch (Exception ex) {
+      fail("testFix error !!!");
+    }
+  }
+  /**
+   * test all available constructor for {@code MapFile.Writer}
+   */
+  @Test
+  @SuppressWarnings("deprecation")
+  public void testDeprecatedConstructors() {
+    String path = new Path(TEST_DIR, "writes.mapfile").toString();
+    try {
+      FileSystem fs = FileSystem.getLocal(conf);
+      MapFile.Writer writer = new MapFile.Writer(conf, fs, path,
+          IntWritable.class, Text.class, CompressionType.RECORD);
+      assertNotNull(writer);
+      writer = new MapFile.Writer(conf, fs, path, IntWritable.class,
+          Text.class, CompressionType.RECORD, defaultProgressable);
+      assertNotNull(writer);
+      writer = new MapFile.Writer(conf, fs, path, IntWritable.class,
+          Text.class, CompressionType.RECORD, defaultCodec, defaultProgressable);
+      assertNotNull(writer);
+      writer = new MapFile.Writer(conf, fs, path,
+          WritableComparator.get(Text.class), Text.class);
+      assertNotNull(writer);
+      writer = new MapFile.Writer(conf, fs, path,
+          WritableComparator.get(Text.class), Text.class,
+          SequenceFile.CompressionType.RECORD);
+      assertNotNull(writer);
+      writer = new MapFile.Writer(conf, fs, path,
+          WritableComparator.get(Text.class), Text.class,
+          CompressionType.RECORD, defaultProgressable);
+      assertNotNull(writer);
+      writer.close();
+
+      MapFile.Reader reader = new MapFile.Reader(fs, path,
+          WritableComparator.get(IntWritable.class), conf);
+      assertNotNull(reader);
+      assertNotNull("reader key is null !!!", reader.getKeyClass());
+      assertNotNull("reader value in null", reader.getValueClass());
+
+    } catch (IOException e) {
+      fail(e.getMessage());
+    }
+  }
+  
+  /**
+   * test {@code MapFile.Writer} constructor 
+   * with IllegalArgumentException  
+   *  
+   */
+  @Test
+  public void testKeyLessWriterCreation() {
+    MapFile.Writer writer = null;
+    try {
+      writer = new MapFile.Writer(conf, TEST_DIR);
+      fail("fail in testKeyLessWriterCreation !!!");
+    } catch (IllegalArgumentException ex) {
+    } catch (Exception e) {
+      fail("fail in testKeyLessWriterCreation. Other ex !!!");
+    } finally {
+      if (writer != null)
+        try {
+          writer.close();
+        } catch (IOException e) {
+        }
+    }
+  }
+  /**
+   * test {@code MapFile.Writer} constructor with IOException
+   */
+  @Test
+  public void testPathExplosionWriterCreation() {
+    Path path = new Path(TEST_DIR, "testPathExplosionWriterCreation.mapfile");
+    String TEST_ERROR_MESSAGE = "Mkdirs failed to create directory "
+        + path.getName();
+    MapFile.Writer writer = null;
+    try {
+      FileSystem fsSpy = spy(FileSystem.get(conf));
+      Path pathSpy = spy(path);
+      when(fsSpy.mkdirs(path)).thenThrow(new IOException(TEST_ERROR_MESSAGE));
+
+      when(pathSpy.getFileSystem(conf)).thenReturn(fsSpy);
+
+      writer = new MapFile.Writer(conf, pathSpy,
+          MapFile.Writer.keyClass(IntWritable.class),
+          MapFile.Writer.valueClass(IntWritable.class));
+      fail("fail in testPathExplosionWriterCreation !!!");
+    } catch (IOException ex) {
+      assertEquals("testPathExplosionWriterCreation ex message error !!!",
+          ex.getMessage(), TEST_ERROR_MESSAGE);
+    } catch (Exception e) {
+      fail("fail in testPathExplosionWriterCreation. Other ex !!!");
+    } finally {
+      if (writer != null)
+        try {
+          writer.close();
+        } catch (IOException e) {
+        }
+    }
+  }
+
+  /**
+   * test {@code MapFile.Writer.append} method with desc order  
+   */
+  @Test
+  public void testDescOrderWithThrowExceptionWriterAppend() {
+    try {
+      MapFile.Writer writer = createWriter(".mapfile", IntWritable.class,
+          Text.class);
+      writer.append(new IntWritable(2), new Text("value: " + 1));
+      writer.append(new IntWritable(2), new Text("value: " + 2));
+      writer.append(new IntWritable(2), new Text("value: " + 4));
+      writer.append(new IntWritable(1), new Text("value: " + 3));
+      fail("testDescOrderWithThrowExceptionWriterAppend not expected exception error !!!");
+    } catch (IOException ex) {
+    } catch (Exception e) {
+      fail("testDescOrderWithThrowExceptionWriterAppend other ex throw !!!");
+    }
+  }
+
+  @Test
+  public void testMainMethodMapFile() {
+    String path = new Path(TEST_DIR, "mainMethodMapFile.mapfile").toString();
+    String inFile = "mainMethodMapFile.mapfile";
+    String outFile = "mainMethodMapFile.mapfile";
+    String[] args = { path, outFile };
+    try {
+      MapFile.Writer writer = createWriter(inFile, IntWritable.class,
+          Text.class);
+      writer.append(new IntWritable(1), new Text("test_text1"));
+      writer.append(new IntWritable(2), new Text("test_text2"));
+      writer.close();
+      MapFile.main(args);
+    } catch (Exception ex) {
+      fail("testMainMethodMapFile error !!!");
+    }
+  }
+
   /**
    * Test getClosest feature.
+   * 
    * @throws Exception
    */
+  @Test
+  @SuppressWarnings("deprecation")
   public void testGetClosest() throws Exception {
-    // Write a mapfile of simple data: keys are 
-    Path dirName = new Path(System.getProperty("test.build.data",".") +
-      getName() + ".mapfile"); 
+    // Write a mapfile of simple data: keys are
+    Path dirName = new Path(TEST_DIR, "testGetClosest.mapfile");
     FileSystem fs = FileSystem.getLocal(conf);
     Path qualifiedDirName = fs.makeQualified(dirName);
     // Make an index entry for every third insertion.
     MapFile.Writer.setIndexInterval(conf, 3);
     MapFile.Writer writer = new MapFile.Writer(conf, fs,
-      qualifiedDirName.toString(), Text.class, Text.class);
+        qualifiedDirName.toString(), Text.class, Text.class);
     // Assert that the index interval is 1
     assertEquals(3, writer.getIndexInterval());
     // Add entries up to 100 in intervals of ten.
@@ -51,74 +614,84 @@ public class TestMapFile extends TestCas
     }
     writer.close();
     // Now do getClosest on created mapfile.
-    MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(),
-      conf);
+    MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
+    try {
     Text key = new Text("55");
     Text value = new Text();
-    Text closest = (Text)reader.getClosest(key, value);
+    Text closest = (Text) reader.getClosest(key, value);
     // Assert that closest after 55 is 60
     assertEquals(new Text("60"), closest);
     // Get closest that falls before the passed key: 50
-    closest = (Text)reader.getClosest(key, value, true);
+    closest = (Text) reader.getClosest(key, value, true);
     assertEquals(new Text("50"), closest);
     // Test get closest when we pass explicit key
     final Text TWENTY = new Text("20");
-    closest = (Text)reader.getClosest(TWENTY, value);
+    closest = (Text) reader.getClosest(TWENTY, value);
     assertEquals(TWENTY, closest);
-    closest = (Text)reader.getClosest(TWENTY, value, true);
+    closest = (Text) reader.getClosest(TWENTY, value, true);
     assertEquals(TWENTY, closest);
-    // Test what happens at boundaries.  Assert if searching a key that is
+    // Test what happens at boundaries. Assert if searching a key that is
     // less than first key in the mapfile, that the first key is returned.
     key = new Text("00");
-    closest = (Text)reader.getClosest(key, value);
+    closest = (Text) reader.getClosest(key, value);
     assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));
-    
-    // If we're looking for the first key before, and we pass in a key before 
+
+    // If we're looking for the first key before, and we pass in a key before
     // the first key in the file, we should get null
-    closest = (Text)reader.getClosest(key, value, true);
+    closest = (Text) reader.getClosest(key, value, true);
     assertNull(closest);
-    
+
     // Assert that null is returned if key is > last entry in mapfile.
     key = new Text("99");
-    closest = (Text)reader.getClosest(key, value);
+    closest = (Text) reader.getClosest(key, value);
     assertNull(closest);
 
     // If we were looking for the key before, we should get the last key
-    closest = (Text)reader.getClosest(key, value, true);
+    closest = (Text) reader.getClosest(key, value, true);
     assertEquals(new Text("90"), closest);
+    } finally {
+      reader.close();
+    }
   }
 
+  @Test
+  @SuppressWarnings("deprecation")
   public void testMidKey() throws Exception {
-    // Write a mapfile of simple data: keys are 
-    Path dirName = new Path(System.getProperty("test.build.data",".") +
-      getName() + ".mapfile"); 
+    // Write a mapfile of simple data: keys are
+    Path dirName = new Path(TEST_DIR, "testMidKey.mapfile");
     FileSystem fs = FileSystem.getLocal(conf);
     Path qualifiedDirName = fs.makeQualified(dirName);
- 
+
     MapFile.Writer writer = new MapFile.Writer(conf, fs,
-      qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
+        qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
     writer.append(new IntWritable(1), new IntWritable(1));
     writer.close();
     // Now do getClosest on created mapfile.
-    MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(),
-      conf);
-    assertEquals(new IntWritable(1), reader.midKey());
+    MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
+    try {
+      assertEquals(new IntWritable(1), reader.midKey());
+    } finally {
+      reader.close();
+    }
   }
 
-
+  @Test
+  @SuppressWarnings("deprecation")
   public void testMidKeyEmpty() throws Exception {
-    // Write a mapfile of simple data: keys are 
-    Path dirName = new Path(System.getProperty("test.build.data",".") +
-      getName() + ".mapfile"); 
+    // Write a mapfile of simple data: keys are
+    Path dirName = new Path(TEST_DIR, "testMidKeyEmpty.mapfile");
     FileSystem fs = FileSystem.getLocal(conf);
     Path qualifiedDirName = fs.makeQualified(dirName);
- 
+
     MapFile.Writer writer = new MapFile.Writer(conf, fs,
-      qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
+        qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
     writer.close();
     // Now do getClosest on created mapfile.
-    MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(),
-      conf);
-    assertEquals(null, reader.midKey());
+    MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
+    try {
+      assertEquals(null, reader.midKey()); 
+    } finally {
+      reader.close();
+    }
   }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSetFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSetFile.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSetFile.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSetFile.java Thu Oct 17 05:32:42 2013
@@ -20,6 +20,8 @@ package org.apache.hadoop.io;
 
 import java.io.*;
 import java.util.*;
+import java.util.concurrent.atomic.AtomicReference;
+
 import junit.framework.TestCase;
 
 import org.apache.commons.logging.*;
@@ -51,6 +53,39 @@ public class TestSetFile extends TestCas
       fs.close();
     }
   }
+  
+  /**
+   * test {@code SetFile.Reader} methods 
+   * next(), get() in combination 
+   */
+  public void testSetFileAccessMethods() {    
+    try {             
+      FileSystem fs = FileSystem.getLocal(conf);
+      int size = 10;
+      writeData(fs, size);
+      SetFile.Reader reader = createReader(fs);
+      assertTrue("testSetFileWithConstruction1 error !!!", reader.next(new IntWritable(0)));
+      // don't know why reader.get(i) return i+1
+      assertEquals("testSetFileWithConstruction2 error !!!", new IntWritable(size/2 + 1), reader.get(new IntWritable(size/2)));      
+      assertNull("testSetFileWithConstruction3 error !!!", reader.get(new IntWritable(size*2)));
+    } catch (Exception ex) {
+      fail("testSetFileWithConstruction error !!!");    
+    }
+  }
+
+  private SetFile.Reader createReader(FileSystem fs) throws IOException  {
+    return new SetFile.Reader(fs, FILE, 
+        WritableComparator.get(IntWritable.class), conf);    
+  }
+  
+  @SuppressWarnings("deprecation")
+  private void writeData(FileSystem fs, int elementSize) throws IOException {
+    MapFile.delete(fs, FILE);    
+    SetFile.Writer writer = new SetFile.Writer(fs, FILE, IntWritable.class);
+    for (int i = 0; i < elementSize; i++)
+      writer.append(new IntWritable(i));
+    writer.close();    
+  }
 
   private static RandomDatum[] generate(int count) {
     LOG.info("generating " + count + " records in memory");

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java Thu Oct 17 05:32:42 2013
@@ -19,11 +19,12 @@
 package org.apache.hadoop.io;
 
 import junit.framework.TestCase;
-
 import java.io.IOException;
+import java.nio.BufferUnderflowException;
 import java.nio.ByteBuffer;
 import java.nio.charset.CharacterCodingException;
 import java.util.Random;
+import com.google.common.primitives.Bytes;
 
 /** Unit tests for LargeUTF8. */
 public class TestText extends TestCase {
@@ -321,7 +322,81 @@ public class TestText extends TestCase {
       (new Text("foo"),
        "{\"type\":\"string\",\"java-class\":\"org.apache.hadoop.io.Text\"}");
   }
-
+  
+  /**
+   * 
+   */
+  public void testCharAt() {
+    String line = "adsawseeeeegqewgasddga";
+    Text text = new Text(line);
+    for (int i = 0; i < line.length(); i++) {
+      assertTrue("testCharAt error1 !!!", text.charAt(i) == line.charAt(i));
+    }    
+    assertEquals("testCharAt error2 !!!", -1, text.charAt(-1));    
+    assertEquals("testCharAt error3 !!!", -1, text.charAt(100));
+  }    
+  
+  /**
+   * test {@code Text} readFields/write operations
+   */
+  public void testReadWriteOperations() {
+    String line = "adsawseeeeegqewgasddga";
+    byte[] inputBytes = line.getBytes();       
+    inputBytes = Bytes.concat(new byte[] {(byte)22}, inputBytes);        
+    
+    DataInputBuffer in = new DataInputBuffer();
+    DataOutputBuffer out = new DataOutputBuffer();
+    Text text = new Text(line);
+    try {      
+      in.reset(inputBytes, inputBytes.length);
+      text.readFields(in);      
+    } catch(Exception ex) {
+      fail("testReadFields error !!!");
+    }    
+    try {
+      text.write(out);
+    } catch(IOException ex) {      
+    } catch(Exception ex) {
+      fail("testReadWriteOperations error !!!");
+    }        
+  }
+  
+  /**
+   * test {@code Text.bytesToCodePoint(bytes) } 
+   * with {@code BufferUnderflowException}
+   * 
+   */
+  public void testBytesToCodePoint() {
+    try {
+      ByteBuffer bytes = ByteBuffer.wrap(new byte[] {-2, 45, 23, 12, 76, 89});                                      
+      Text.bytesToCodePoint(bytes);      
+      assertTrue("testBytesToCodePoint error !!!", bytes.position() == 6 );                      
+    } catch (BufferUnderflowException ex) {
+      fail("testBytesToCodePoint unexp exception");
+    } catch (Exception e) {
+      fail("testBytesToCodePoint unexp exception");
+    }    
+  }
+  
+  public void testbytesToCodePointWithInvalidUTF() {
+    try {                 
+      Text.bytesToCodePoint(ByteBuffer.wrap(new byte[] {-2}));
+      fail("testbytesToCodePointWithInvalidUTF error unexp exception !!!");
+    } catch (BufferUnderflowException ex) {      
+    } catch(Exception e) {
+      fail("testbytesToCodePointWithInvalidUTF error unexp exception !!!");
+    }
+  }
+  
+  public void testUtf8Length() {         
+    assertEquals("testUtf8Length1 error   !!!", 1, Text.utf8Length(new String(new char[]{(char)1})));
+    assertEquals("testUtf8Length127 error !!!", 1, Text.utf8Length(new String(new char[]{(char)127})));
+    assertEquals("testUtf8Length128 error !!!", 2, Text.utf8Length(new String(new char[]{(char)128})));
+    assertEquals("testUtf8Length193 error !!!", 2, Text.utf8Length(new String(new char[]{(char)193})));    
+    assertEquals("testUtf8Length225 error !!!", 2, Text.utf8Length(new String(new char[]{(char)225})));
+    assertEquals("testUtf8Length254 error !!!", 2, Text.utf8Length(new String(new char[]{(char)254})));                 
+  }
+  
   public static void main(String[] args)  throws Exception
   {
     TestText test = new TestText("main");

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java Thu Oct 17 05:32:42 2013
@@ -285,6 +285,45 @@ public class TestNativeIO {
     FileUtil.setExecutable(testFile, true);
     assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
         NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
+
+    // Validate that access checks work as expected for long paths
+
+    // Assemble a path longer then 260 chars (MAX_PATH)
+    String testFileRelativePath = "";
+    for (int i = 0; i < 15; ++i) {
+      testFileRelativePath += "testfileaccessfolder\\";
+    }
+    testFileRelativePath += "testfileaccess";
+    testFile = new File(TEST_DIR, testFileRelativePath);
+    assertTrue(testFile.getParentFile().mkdirs());
+    assertTrue(testFile.createNewFile());
+
+    // Validate ACCESS_READ
+    FileUtil.setReadable(testFile, false);
+    assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
+        NativeIO.Windows.AccessRight.ACCESS_READ));
+
+    FileUtil.setReadable(testFile, true);
+    assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
+        NativeIO.Windows.AccessRight.ACCESS_READ));
+
+    // Validate ACCESS_WRITE
+    FileUtil.setWritable(testFile, false);
+    assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
+        NativeIO.Windows.AccessRight.ACCESS_WRITE));
+
+    FileUtil.setWritable(testFile, true);
+    assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
+        NativeIO.Windows.AccessRight.ACCESS_WRITE));
+
+    // Validate ACCESS_EXECUTE
+    FileUtil.setExecutable(testFile, false);
+    assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
+        NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
+
+    FileUtil.setExecutable(testFile, true);
+    assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
+        NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
   }
 
   @Test (timeout = 30000)

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java Thu Oct 17 05:32:42 2013
@@ -152,4 +152,10 @@ public class StaticMapping extends Abstr
     // reloadCachedMappings does nothing for StaticMapping; there is
     // nowhere to reload from since all data is in memory.
   }
+
+  @Override
+  public void reloadCachedMappings(List<String> names) {
+    // reloadCachedMappings does nothing for StaticMapping; there is
+    // nowhere to reload from since all data is in memory.
+  }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSwitchMapping.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSwitchMapping.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSwitchMapping.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSwitchMapping.java Thu Oct 17 05:32:42 2013
@@ -120,5 +120,9 @@ public class TestSwitchMapping extends A
     @Override
     public void reloadCachedMappings() {
     }
+
+    @Override
+    public void reloadCachedMappings(List<String> names) {
+    }
   }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java Thu Oct 17 05:32:42 2013
@@ -16,11 +16,21 @@
  */
 package org.apache.hadoop.security;
 
-import static org.junit.Assert.*;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.TestSaslRPC;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.Shell;
 import org.junit.*;
 
-import static org.mockito.Mockito.*;
-
+import javax.security.auth.Subject;
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.LoginContext;
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStreamReader;
@@ -30,21 +40,13 @@ import java.util.Collection;
 import java.util.LinkedHashSet;
 import java.util.Set;
 
-import javax.security.auth.Subject;
-import javax.security.auth.login.AppConfigurationEntry;
-import javax.security.auth.login.LoginContext;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.apache.hadoop.security.authentication.util.KerberosName;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import static org.apache.hadoop.test.MetricsAsserts.*;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
-import org.apache.hadoop.util.Shell;
+import static org.apache.hadoop.ipc.TestSaslRPC.*;
+import static org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
+import static org.apache.hadoop.test.MetricsAsserts.*;
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 public class TestUserGroupInformation {
   final private static String USER_NAME = "user1@HADOOP.APACHE.ORG";
@@ -786,4 +788,29 @@ public class TestUserGroupInformation {
     UserGroupInformation.setLoginUser(ugi);
     assertEquals(ugi, UserGroupInformation.getLoginUser());
   }
+
+  /**
+   * In some scenario, such as HA, delegation tokens are associated with a
+   * logical name. The tokens are cloned and are associated with the
+   * physical address of the server where the service is provided.
+   * This test ensures cloned delegated tokens are locally used
+   * and are not returned in {@link UserGroupInformation#getCredentials()}
+   */
+  @Test
+  public void testPrivateTokenExclusion() throws Exception  {
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    TestTokenIdentifier tokenId = new TestTokenIdentifier();
+    Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(
+            tokenId.getBytes(), "password".getBytes(),
+            tokenId.getKind(), null);
+    ugi.addToken(new Text("regular-token"), token);
+
+    // Now add cloned private token
+    ugi.addToken(new Text("private-token"), new Token.PrivateToken<TestTokenIdentifier>(token));
+    ugi.addToken(new Text("private-token1"), new Token.PrivateToken<TestTokenIdentifier>(token));
+
+    // Ensure only non-private tokens are returned
+    Collection<Token<? extends TokenIdentifier>> tokens = ugi.getCredentials().getAllTokens();
+    assertEquals(1, tokens.size());
+  }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java Thu Oct 17 05:32:42 2013
@@ -145,6 +145,28 @@ public class KeyStoreTestUtil {
     saveKeyStore(ks, filename, password);
   }
 
+  /**
+   * Creates a keystore with a single key and saves it to a file.
+   * 
+   * @param filename String file to save
+   * @param password String store password to set on keystore
+   * @param keyPassword String key password to set on key
+   * @param alias String alias to use for the key
+   * @param privateKey Key to save in keystore
+   * @param cert Certificate to use as certificate chain associated to key
+   * @throws GeneralSecurityException for any error with the security APIs
+   * @throws IOException if there is an I/O error saving the file
+   */
+  public static void createKeyStore(String filename,
+                                    String password, String keyPassword, String alias,
+                                    Key privateKey, Certificate cert)
+    throws GeneralSecurityException, IOException {
+    KeyStore ks = createEmptyKeyStore();
+    ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(),
+                   new Certificate[]{cert});
+    saveKeyStore(ks, filename, password);
+  }
+
   public static void createTrustStore(String filename,
                                       String password, String alias,
                                       Certificate cert)
@@ -178,6 +200,19 @@ public class KeyStoreTestUtil {
     f.delete();
   }
 
+  /**
+   * Performs complete setup of SSL configuration in preparation for testing an
+   * SSLFactory.  This includes keys, certs, keystores, truststores, the server
+   * SSL configuration file, the client SSL configuration file, and the master
+   * configuration file read by the SSLFactory.
+   * 
+   * @param keystoresDir String directory to save keystores
+   * @param sslConfDir String directory to save SSL configuration files
+   * @param conf Configuration master configuration to be used by an SSLFactory,
+   *   which will be mutated by this method
+   * @param useClientCert boolean true to make the client present a cert in the
+   *   SSL handshake
+   */
   public static void setupSSLConfig(String keystoresDir, String sslConfDir,
                                     Configuration conf, boolean useClientCert)
     throws Exception {
@@ -213,58 +248,115 @@ public class KeyStoreTestUtil {
 
     KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs);
 
-    Configuration clientSSLConf = new Configuration(false);
-    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.CLIENT,
-      FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY), clientKS);
-    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.CLIENT,
-      FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY), clientPassword);
-    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.CLIENT,
-      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS);
-    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.CLIENT,
-      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), trustPassword);
-    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.CLIENT,
-      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000");
+    Configuration clientSSLConf = createClientSSLConfig(clientKS, clientPassword,
+      clientPassword, trustKS);
+    Configuration serverSSLConf = createServerSSLConfig(serverKS, serverPassword,
+      serverPassword, trustKS);
 
-    Configuration serverSSLConf = new Configuration(false);
-    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.SERVER,
-      FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY), serverKS);
-    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.SERVER,
-      FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY), serverPassword);
-    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.SERVER,
-      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS);
-    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.SERVER,
-      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), trustPassword);
-    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.SERVER,
-      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000");
+    saveConfig(sslClientConfFile, clientSSLConf);
+    saveConfig(sslServerConfFile, serverSSLConf);
 
-    Writer writer = new FileWriter(sslClientConfFile);
-    try {
-      clientSSLConf.writeXml(writer);
-    } finally {
-      writer.close();
+    conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "ALLOW_ALL");
+    conf.set(SSLFactory.SSL_CLIENT_CONF_KEY, sslClientConfFile.getName());
+    conf.set(SSLFactory.SSL_SERVER_CONF_KEY, sslServerConfFile.getName());
+    conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, useClientCert);
+  }
+
+  /**
+   * Creates SSL configuration for a client.
+   * 
+   * @param clientKS String client keystore file
+   * @param password String store password, or null to avoid setting store
+   *   password
+   * @param keyPassword String key password, or null to avoid setting key
+   *   password
+   * @param trustKS String truststore file
+   * @return Configuration for client SSL
+   */
+  public static Configuration createClientSSLConfig(String clientKS,
+      String password, String keyPassword, String trustKS) {
+    Configuration clientSSLConf = createSSLConfig(SSLFactory.Mode.CLIENT,
+      clientKS, password, keyPassword, trustKS);
+    return clientSSLConf;
+  }
+
+  /**
+   * Creates SSL configuration for a server.
+   * 
+   * @param serverKS String server keystore file
+   * @param password String store password, or null to avoid setting store
+   *   password
+   * @param keyPassword String key password, or null to avoid setting key
+   *   password
+   * @param trustKS String truststore file
+   * @return Configuration for server SSL
+   */
+  public static Configuration createServerSSLConfig(String serverKS,
+      String password, String keyPassword, String trustKS) throws IOException {
+    Configuration serverSSLConf = createSSLConfig(SSLFactory.Mode.SERVER,
+      serverKS, password, keyPassword, trustKS);
+    return serverSSLConf;
+  }
+
+  /**
+   * Creates SSL configuration.
+   * 
+   * @param mode SSLFactory.Mode mode to configure
+   * @param keystore String keystore file
+   * @param password String store password, or null to avoid setting store
+   *   password
+   * @param keyPassword String key password, or null to avoid setting key
+   *   password
+   * @param trustKS String truststore file
+   * @return Configuration for SSL
+   */
+  private static Configuration createSSLConfig(SSLFactory.Mode mode,
+      String keystore, String password, String keyPassword, String trustKS) {
+    String trustPassword = "trustP";
+
+    Configuration sslConf = new Configuration(false);
+    if (keystore != null) {
+      sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
+        FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY), keystore);
+    }
+    if (password != null) {
+      sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
+        FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY), password);
     }
+    if (keyPassword != null) {
+      sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
+        FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY),
+        keyPassword);
+    }
+    if (trustKS != null) {
+      sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
+        FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS);
+    }
+    if (trustPassword != null) {
+      sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
+        FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY),
+        trustPassword);
+    }
+    sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
+      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000");
 
-    writer = new FileWriter(sslServerConfFile);
+    return sslConf;
+  }
+
+  /**
+   * Saves configuration to a file.
+   * 
+   * @param file File to save
+   * @param conf Configuration contents to write to file
+   * @throws IOException if there is an I/O error saving the file
+   */
+  public static void saveConfig(File file, Configuration conf)
+      throws IOException {
+    Writer writer = new FileWriter(file);
     try {
-      serverSSLConf.writeXml(writer);
+      conf.writeXml(writer);
     } finally {
       writer.close();
     }
-
-    conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "ALLOW_ALL");
-    conf.set(SSLFactory.SSL_CLIENT_CONF_KEY, sslClientConfFile.getName());
-    conf.set(SSLFactory.SSL_SERVER_CONF_KEY, sslServerConfFile.getName());
-    conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, useClientCert);
   }
-
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java Thu Oct 17 05:32:42 2013
@@ -29,12 +29,19 @@ import javax.net.ssl.HttpsURLConnection;
 import java.io.File;
 import java.net.URL;
 import java.security.GeneralSecurityException;
+import java.security.KeyPair;
+import java.security.cert.X509Certificate;
+import java.util.Collections;
+import java.util.Map;
 
 public class TestSSLFactory {
 
   private static final String BASEDIR =
     System.getProperty("test.build.dir", "target/test-dir") + "/" +
     TestSSLFactory.class.getSimpleName();
+  private static final String KEYSTORES_DIR =
+    new File(BASEDIR).getAbsolutePath();
+  private String sslConfsDir;
 
   @BeforeClass
   public static void setUp() throws Exception {
@@ -46,18 +53,16 @@ public class TestSSLFactory {
   private Configuration createConfiguration(boolean clientCert)
     throws Exception {
     Configuration conf = new Configuration();
-    String keystoresDir = new File(BASEDIR).getAbsolutePath();
-    String sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class);
-    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf, clientCert);
+    KeyStoreTestUtil.setupSSLConfig(KEYSTORES_DIR, sslConfsDir, conf,
+      clientCert);
     return conf;
   }
 
   @After
   @Before
   public void cleanUp() throws Exception {
-    String keystoresDir = new File(BASEDIR).getAbsolutePath();
-    String sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class);
-    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfsDir);
+    sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class);
+    KeyStoreTestUtil.cleanupSSLConfig(KEYSTORES_DIR, sslConfsDir);
   }
 
   @Test(expected = IllegalStateException.class)
@@ -181,4 +186,90 @@ public class TestSSLFactory {
     }
   }
 
+  @Test
+  public void testServerDifferentPasswordAndKeyPassword() throws Exception {
+    checkSSLFactoryInitWithPasswords(SSLFactory.Mode.SERVER, "password",
+      "keyPassword", "password", "keyPassword");
+  }
+
+  @Test
+  public void testServerKeyPasswordDefaultsToPassword() throws Exception {
+    checkSSLFactoryInitWithPasswords(SSLFactory.Mode.SERVER, "password",
+      "password", "password", null);
+  }
+
+  @Test
+  public void testClientDifferentPasswordAndKeyPassword() throws Exception {
+    checkSSLFactoryInitWithPasswords(SSLFactory.Mode.CLIENT, "password",
+      "keyPassword", "password", "keyPassword");
+  }
+
+  @Test
+  public void testClientKeyPasswordDefaultsToPassword() throws Exception {
+    checkSSLFactoryInitWithPasswords(SSLFactory.Mode.CLIENT, "password",
+      "password", "password", null);
+  }
+
+  /**
+   * Checks that SSLFactory initialization is successful with the given
+   * arguments.  This is a helper method for writing test cases that cover
+   * different combinations of settings for the store password and key password.
+   * It takes care of bootstrapping a keystore, a truststore, and SSL client or
+   * server configuration.  Then, it initializes an SSLFactory.  If no exception
+   * is thrown, then initialization was successful.
+   * 
+   * @param mode SSLFactory.Mode mode to test
+   * @param password String store password to set on keystore
+   * @param keyPassword String key password to set on keystore
+   * @param confPassword String store password to set in SSL config file, or null
+   *   to avoid setting in SSL config file
+   * @param confKeyPassword String key password to set in SSL config file, or
+   *   null to avoid setting in SSL config file
+   * @throws Exception for any error
+   */
+  private void checkSSLFactoryInitWithPasswords(SSLFactory.Mode mode,
+      String password, String keyPassword, String confPassword,
+      String confKeyPassword) throws Exception {
+    String keystore = new File(KEYSTORES_DIR, "keystore.jks").getAbsolutePath();
+    String truststore = new File(KEYSTORES_DIR, "truststore.jks")
+      .getAbsolutePath();
+    String trustPassword = "trustP";
+
+    // Create keys, certs, keystore, and truststore.
+    KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA");
+    X509Certificate cert = KeyStoreTestUtil.generateCertificate("CN=Test",
+      keyPair, 30, "SHA1withRSA");
+    KeyStoreTestUtil.createKeyStore(keystore, password, keyPassword, "Test",
+      keyPair.getPrivate(), cert);
+    Map<String, X509Certificate> certs = Collections.singletonMap("server",
+      cert);
+    KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs);
+
+    // Create SSL configuration file, for either server or client.
+    final String sslConfFileName;
+    final Configuration sslConf;
+    if (mode == SSLFactory.Mode.SERVER) {
+      sslConfFileName = "ssl-server.xml";
+      sslConf = KeyStoreTestUtil.createServerSSLConfig(keystore, confPassword,
+        confKeyPassword, truststore);
+    } else {
+      sslConfFileName = "ssl-client.xml";
+      sslConf = KeyStoreTestUtil.createClientSSLConfig(keystore, confPassword,
+        confKeyPassword, truststore);
+    }
+    KeyStoreTestUtil.saveConfig(new File(sslConfsDir, sslConfFileName), sslConf);
+
+    // Create the master configuration for use by the SSLFactory, which by
+    // default refers to the ssl-server.xml or ssl-client.xml created above.
+    Configuration conf = new Configuration();
+    conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, true);
+
+    // Try initializing an SSLFactory.
+    SSLFactory sslFactory = new SSLFactory(mode, conf);
+    try {
+      sslFactory.init();
+    } finally {
+      sslFactory.destroy();
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/resources/core-site.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/resources/core-site.xml?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/resources/core-site.xml (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/resources/core-site.xml Thu Oct 17 05:32:42 2013
@@ -69,4 +69,13 @@
   <value>simple</value>
 </property>
 
+<property>
+  <name>nfs3.server.port</name>
+  <value>2079</value>
+</property>
+
+<property>
+  <name>nfs3.mountd.port</name>
+  <value>4272</value>
+</property>
 </configuration>

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml Thu Oct 17 05:32:42 2013
@@ -601,16 +601,28 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-setrep \[-R\] \[-w\] &lt;rep&gt; &lt;path/file&gt; \.\.\.:( |\t)*Set the replication level of a file.( )*</expected-output>
+          <expected-output>^-setrep \[-R\] \[-w\] &lt;rep&gt; &lt;path&gt; \.\.\.:( |\t)*Set the replication level of a file. If &lt;path&gt; is a directory( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*The -R flag requests a recursive change of replication level( )*</expected-output>
+          <expected-output>^( |\t)*then the command recursively changes the replication factor of( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*for an entire tree.( )*</expected-output>
+          <expected-output>^( |\t)*all files under the directory tree rooted at &lt;path&gt;\.( )*</expected-output>
         </comparator>
+        <comparator>
+            <type>RegexpComparator</type>
+            <expected-output>^( |\t)*The -w flag requests that the command wait for the replication( )*</expected-output>
+        </comparator>
+        <comparator>
+            <type>RegexpComparator</type>
+            <expected-output>^( |\t)*to complete. This can potentially take a very long time\.( )*</expected-output>
+        </comparator>
+          <comparator>
+              <type>RegexpComparator</type>
+              <expected-output>^( |\t)*The -R flag is accepted for backwards compatibility\. It has no effect\.( )*</expected-output>
+          </comparator>
       </comparators>
     </test>
 

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/pom.xml?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/pom.xml (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/pom.xml Thu Oct 17 05:32:42 2013
@@ -86,13 +86,11 @@
     <dependency>
       <groupId>io.netty</groupId>
       <artifactId>netty</artifactId>
-      <version>3.6.2.Final</version>
       <scope>compile</scope>
     </dependency>
     <dependency>
       <groupId>com.google.guava</groupId>
       <artifactId>guava</artifactId>
-      <version>11.0.2</version>
     </dependency>
   </dependencies>
 

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java Thu Oct 17 05:32:42 2013
@@ -22,6 +22,7 @@ import java.util.List;
 import org.apache.hadoop.nfs.NfsExports;
 import org.apache.hadoop.oncrpc.RpcAcceptedReply;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
 
 /**
@@ -37,11 +38,10 @@ public class MountResponse {
   /** Response for RPC call {@link MountInterface.MNTPROC#MNT} */
   public static XDR writeMNTResponse(int status, XDR xdr, int xid,
       byte[] handle) {
-    RpcAcceptedReply.voidReply(xdr, xid);
+    RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     xdr.writeInt(status);
     if (status == MNT_OK) {
-      xdr.writeInt(handle.length);
-      xdr.writeFixedOpaque(handle);
+      xdr.writeVariableOpaque(handle);
       // Only MountV3 returns a list of supported authFlavors
       xdr.writeInt(1);
       xdr.writeInt(AuthFlavor.AUTH_SYS.getValue());
@@ -51,7 +51,7 @@ public class MountResponse {
 
   /** Response for RPC call {@link MountInterface.MNTPROC#DUMP} */
   public static XDR writeMountList(XDR xdr, int xid, List<MountEntry> mounts) {
-    RpcAcceptedReply.voidReply(xdr, xid);
+    RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     for (MountEntry mountEntry : mounts) {
       xdr.writeBoolean(true); // Value follows yes
       xdr.writeString(mountEntry.host());
@@ -66,7 +66,7 @@ public class MountResponse {
       List<NfsExports> hostMatcher) {
     assert (exports.size() == hostMatcher.size());
 
-    RpcAcceptedReply.voidReply(xdr, xid);
+    RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     for (int i = 0; i < exports.size(); i++) {
       xdr.writeBoolean(true); // Value follows - yes
       xdr.writeString(exports.get(i));

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java Thu Oct 17 05:32:42 2013
@@ -149,6 +149,8 @@ public class IdUserGroup {
     checkAndUpdateMaps();
     String uname = uidNameMap.get(uid);
     if (uname == null) {
+      LOG.warn("Can't find user name for uid " + uid
+          + ". Use default user name " + unknown);
       uname = unknown;
     }
     return uname;
@@ -158,6 +160,8 @@ public class IdUserGroup {
     checkAndUpdateMaps();
     String gname = gidNameMap.get(gid);
     if (gname == null) {
+      LOG.warn("Can't find group name for gid " + gid
+          + ". Use default group name " + unknown);
       gname = unknown;
     }
     return gname;

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java Thu Oct 17 05:32:42 2013
@@ -19,15 +19,11 @@ package org.apache.hadoop.nfs.nfs3;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mount.MountdBase;
-import org.apache.hadoop.oncrpc.RpcFrameDecoder;
 import org.apache.hadoop.oncrpc.RpcProgram;
 import org.apache.hadoop.oncrpc.SimpleTcpServer;
-import org.apache.hadoop.oncrpc.SimpleTcpServerHandler;
 import org.apache.hadoop.portmap.PortmapMapping;
-import org.jboss.netty.channel.ChannelPipeline;
-import org.jboss.netty.channel.ChannelPipelineFactory;
-import org.jboss.netty.channel.Channels;
 
 /**
  * Nfs server. Supports NFS v3 using {@link RpcProgram}.
@@ -38,6 +34,7 @@ public abstract class Nfs3Base {
   public static final Log LOG = LogFactory.getLog(Nfs3Base.class);
   private final MountdBase mountd;
   private final RpcProgram rpcProgram;
+  private final int nfsPort;
   
   public MountdBase getMountBase() {
     return mountd;
@@ -47,9 +44,17 @@ public abstract class Nfs3Base {
     return rpcProgram;
   }
 
+  protected Nfs3Base(MountdBase mountd, RpcProgram program, Configuration conf) {
+    this.mountd = mountd;
+    this.rpcProgram = program;
+    this.nfsPort = conf.getInt("nfs3.server.port", Nfs3Constant.PORT);
+    LOG.info("NFS server port set to: "+nfsPort);
+  }
+
   protected Nfs3Base(MountdBase mountd, RpcProgram program) {
     this.mountd = mountd;
     this.rpcProgram = program;
+    this.nfsPort = Nfs3Constant.PORT;
   }
 
   public void start(boolean register) {
@@ -61,19 +66,8 @@ public abstract class Nfs3Base {
   }
 
   private void startTCPServer() {
-    SimpleTcpServer tcpServer = new SimpleTcpServer(Nfs3Constant.PORT,
-        rpcProgram, 0) {
-      @Override
-      public ChannelPipelineFactory getPipelineFactory() {
-        return new ChannelPipelineFactory() {
-          @Override
-          public ChannelPipeline getPipeline() {
-            return Channels.pipeline(new RpcFrameDecoder(),
-                new SimpleTcpServerHandler(rpcProgram));
-          }
-        };
-      }
-    };
+    SimpleTcpServer tcpServer = new SimpleTcpServer(nfsPort,
+        rpcProgram, 0);
     tcpServer.run();
   }
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java Thu Oct 17 05:32:42 2013
@@ -72,19 +72,18 @@ public class Nfs3FileAttributes {
   }
    
   public Nfs3FileAttributes() {
-    this(false, 0, (short)0, 0, 0, 0, 0, 0, 0, 0);
+    this(NfsFileType.NFSREG, 0, (short)0, 0, 0, 0, 0, 0, 0, 0);
   }
 
-  public Nfs3FileAttributes(boolean isDir, int nlink, short mode, int uid,
+  public Nfs3FileAttributes(NfsFileType nfsType, int nlink, short mode, int uid,
       int gid, long size, long fsid, long fileid, long mtime, long atime) {
-    this.type = isDir ? NfsFileType.NFSDIR.toValue() : NfsFileType.NFSREG
-        .toValue();
+    this.type = nfsType.toValue();
     this.mode = mode;
-    this.nlink = isDir ? (nlink + 2) : 1;
+    this.nlink = (type == NfsFileType.NFSDIR.toValue()) ? (nlink + 2) : 1;
     this.uid = uid;
     this.gid = gid;
     this.size = size;
-    if(isDir) {
+    if(type == NfsFileType.NFSDIR.toValue()) {
       this.size = getDirSize(nlink);
     }
     this.used = this.size;

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java Thu Oct 17 05:32:42 2013
@@ -97,6 +97,6 @@ public interface Nfs3Interface {
       InetAddress client);
 
   /** COMMIT: Commit cached data on a server to stable storage */
-  public NFS3Response commit(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response commit(XDR xdr, Channel channel, int xid,
+      SecurityHandler securityHandler, InetAddress client);
 }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java Thu Oct 17 05:32:42 2013
@@ -25,9 +25,9 @@ import org.apache.hadoop.oncrpc.XDR;
  * SYMLINK3 Request
  */
 public class SYMLINK3Request extends RequestWithHandle {
-  private final String name;
+  private final String name;     // The name of the link
   private final SetAttr3 symAttr;
-  private final String symData;
+  private final String symData;  // It contains the target
   
   public SYMLINK3Request(XDR xdr) throws IOException {
     super(xdr);

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java Thu Oct 17 05:32:42 2013
@@ -28,8 +28,8 @@ import org.apache.hadoop.oncrpc.XDR;
  * WRITE3 Request
  */
 public class WRITE3Request extends RequestWithHandle {
-  private final long offset;
-  private final int count;
+  private long offset;
+  private int count;
   private final WriteStableHow stableHow;
   private final ByteBuffer data;
 
@@ -54,10 +54,18 @@ public class WRITE3Request extends Reque
     return this.offset;
   }
 
+  public void setOffset(long offset) {
+    this.offset = offset;
+  }
+  
   public int getCount() {
     return this.count;
   }
 
+  public void setCount(int count) {
+    this.count = count;
+  }
+  
   public WriteStableHow getStableHow() {
     return this.stableHow;
   }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/ACCESS3Response.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/ACCESS3Response.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/ACCESS3Response.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/ACCESS3Response.java Thu Oct 17 05:32:42 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.respo
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * ACCESS3 Response 
@@ -43,8 +44,8 @@ public class ACCESS3Response extends NFS
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     out.writeBoolean(true);
     postOpAttr.serialize(out);
     if (this.getStatus() == Nfs3Status.NFS3_OK) {

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/COMMIT3Response.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/COMMIT3Response.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/COMMIT3Response.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/COMMIT3Response.java Thu Oct 17 05:32:42 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.respo
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * COMMIT3 Response
@@ -47,8 +48,8 @@ public class COMMIT3Response extends NFS
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     fileWcc.serialize(out);
     if (getStatus() == Nfs3Status.NFS3_OK) {
       out.writeLongAsHyper(verf);

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/CREATE3Response.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/CREATE3Response.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/CREATE3Response.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/CREATE3Response.java Thu Oct 17 05:32:42 2013
@@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.nfs3.FileHa
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * CREATE3 Response
@@ -55,8 +56,8 @@ public class CREATE3Response extends NFS
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     if (getStatus() == Nfs3Status.NFS3_OK) {
       out.writeBoolean(true); // Handle follows
       objHandle.serialize(out);

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSINFO3Response.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSINFO3Response.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSINFO3Response.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSINFO3Response.java Thu Oct 17 05:32:42 2013
@@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.NfsTime;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * FSINFO3 Response
@@ -109,8 +110,8 @@ public class FSINFO3Response extends NFS
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     out.writeBoolean(true);
     postOpAttr.serialize(out);
 

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSSTAT3Response.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSSTAT3Response.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSSTAT3Response.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSSTAT3Response.java Thu Oct 17 05:32:42 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.respo
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * FSSTAT3 Response
@@ -90,8 +91,8 @@ public class FSSTAT3Response extends NFS
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     out.writeBoolean(true);
     if (postOpAttr == null) {
       postOpAttr = new Nfs3FileAttributes();

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/GETATTR3Response.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/GETATTR3Response.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/GETATTR3Response.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/GETATTR3Response.java Thu Oct 17 05:32:42 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.respo
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * GETATTR3 Response
@@ -40,8 +41,8 @@ public class GETATTR3Response extends NF
   }
   
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     if (getStatus() == Nfs3Status.NFS3_OK) {
       postOpAttr.serialize(out);
     }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/LOOKUP3Response.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/LOOKUP3Response.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/LOOKUP3Response.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/LOOKUP3Response.java Thu Oct 17 05:32:42 2013
@@ -23,6 +23,7 @@ import org.apache.hadoop.nfs.nfs3.FileHa
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * LOOKUP3 Response
@@ -61,8 +62,8 @@ public class LOOKUP3Response extends NFS
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     if (this.status == Nfs3Status.NFS3_OK) {
       fileHandle.serialize(out);
       out.writeBoolean(true); // Attribute follows

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/MKDIR3Response.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/MKDIR3Response.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/MKDIR3Response.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/MKDIR3Response.java Thu Oct 17 05:32:42 2013
@@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.nfs3.FileHa
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * MKDIR3 Response
@@ -55,8 +56,8 @@ public class MKDIR3Response extends NFS3
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     if (getStatus() == Nfs3Status.NFS3_OK) {
       out.writeBoolean(true); // Handle follows
       objFileHandle.serialize(out);

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/NFS3Response.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/NFS3Response.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/NFS3Response.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/NFS3Response.java Thu Oct 17 05:32:42 2013
@@ -19,11 +19,13 @@ package org.apache.hadoop.nfs.nfs3.respo
 
 import org.apache.hadoop.oncrpc.RpcAcceptedReply;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
- * Abstract class for a NFSv3 response
+ * Base class for a NFSv3 response. This class and its subclasses contain
+ * the response from NFSv3 handlers.
  */
-abstract public class NFS3Response {
+public class NFS3Response {
   protected int status;
 
   public NFS3Response(int status) {
@@ -38,8 +40,13 @@ abstract public class NFS3Response {
     this.status = status;
   }
   
-  public XDR send(XDR out, int xid) {
-    RpcAcceptedReply.voidReply(out, xid);
+  /**
+   * Write the response, along with the rpc header (including verifier), to the
+   * XDR.
+   */
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    RpcAcceptedReply reply = RpcAcceptedReply.getAcceptInstance(xid, verifier);
+    reply.write(out);
     out.writeInt(this.getStatus());
     return out;
   }

Modified: hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/PATHCONF3Response.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/PATHCONF3Response.java?rev=1532967&r1=1532966&r2=1532967&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/PATHCONF3Response.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/PATHCONF3Response.java Thu Oct 17 05:32:42 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.respo
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * PATHCONF3 Response
@@ -77,8 +78,8 @@ public class PATHCONF3Response extends N
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     out.writeBoolean(true);
     postOpAttr.serialize(out);