You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/10/11 08:14:40 UTC

svn commit: r1396918 [1/3] - in /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs/ hadoop-hdfs/src/ h...

Author: todd
Date: Thu Oct 11 06:14:26 2012
New Revision: 1396918

URL: http://svn.apache.org/viewvc?rev=1396918&view=rev
Log:
Merge trunk into branch

Added:
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java
      - copied unchanged from r1396916, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSocketCache.java
      - copied unchanged from r1396916, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSocketCache.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
      - copied unchanged from r1396916, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
      - copied unchanged from r1396916, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java
      - copied unchanged from r1396916, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java
Removed:
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/exception.c
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/exception.h
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/expect.h
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_jni.c
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/jni_helper.c
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/jni_helper.h
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/webhdfs.h
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
Modified:
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/CMakeLists.txt
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_multi_write.c
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_read_bm.c
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/conf/hadoop-metrics2.properties
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
    hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer

Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1390199-1396916

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java Thu Oct 11 06:14:26 2012
@@ -261,7 +261,7 @@ public class HttpFSParametersProvider ex
     /**
      * Parameter name.
      */
-    public static final String NAME = "len";
+    public static final String NAME = "length";
 
     /**
      * Constructor.

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java Thu Oct 11 06:14:26 2012
@@ -24,6 +24,7 @@ import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FileWriter;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.Writer;
@@ -146,6 +147,7 @@ public class TestHttpFSServer extends HF
     conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
              HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
     conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath());
+    conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
     File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
     os = new FileOutputStream(httpfsSite);
     conf.writeXml(os);
@@ -233,6 +235,31 @@ public class TestHttpFSServer extends HF
   @TestDir
   @TestJetty
   @TestHdfs
+  public void testOpenOffsetLength() throws Exception {
+    createHttpFSServer(false);
+
+    byte[] array = new byte[]{0, 1, 2, 3};
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    fs.mkdirs(new Path("/tmp"));
+    OutputStream os = fs.create(new Path("/tmp/foo"));
+    os.write(array);
+    os.close();
+
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    URL url = new URL(TestJettyHelper.getJettyURL(),
+                      MessageFormat.format("/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2", user));
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+    InputStream is = conn.getInputStream();
+    Assert.assertEquals(1, is.read());
+    Assert.assertEquals(2, is.read());
+    Assert.assertEquals(-1, is.read());
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
   public void testPutNoOperation() throws Exception {
     createHttpFSServer(false);
 

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Oct 11 06:14:26 2012
@@ -137,6 +137,16 @@ Trunk (Unreleased)
     HDFS-3880. Use Builder to build RPC server in HDFS.
     (Brandon Li vias suresh)
 
+    HDFS-2127. Add a test that ensure AccessControlExceptions contain
+    a full path. (Stephen Chu via eli)
+
+    HDFS-3995. Use DFSTestUtil.createFile() for file creation and 
+    writing in test cases. (Jing Zhao via suresh)
+
+    HDFS-3735. NameNode WebUI should allow sorting live datanode list by fields
+    Block Pool Used, Block Pool Used(%) and Failed Volumes.
+    (Brahma Reddy Battula via suresh)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -235,6 +245,31 @@ Release 2.0.3-alpha - Unreleased 
 
     HDFS-3939. NN RPC address cleanup. (eli)
 
+    HDFS-3373. Change DFSClient input stream socket cache to global static and add
+    a thread to cleanup expired cache entries. (John George via szetszwo)
+
+    HDFS-3896. Add descriptions for dfs.namenode.rpc-address and
+    dfs.namenode.servicerpc-address to hdfs-default.xml. (Jeff Lord via atm)
+
+    HDFS-3996. Add debug log removed in HDFS-3873 back. (eli)
+
+    HDFS-3916. libwebhdfs (C client) code cleanups.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3813. Log error message if security and WebHDFS are enabled but
+    principal/keytab are not configured. (Stephen Chu via atm)
+
+    HDFS-3483. Better error message when hdfs fsck is run against a ViewFS
+    config. (Stephen Fritz via atm)
+
+    HDFS-3682. MiniDFSCluster#init should provide more info when it fails.
+    (todd via eli)
+
+    HDFS-4008. TestBalancerWithEncryptedTransfer needs a timeout. (eli)
+
+    HDFS-4007. Rehabilitate bit-rotted unit tests under
+    hadoop-hdfs-project/hadoop-hdfs/src/test/unit/ (Colin Patrick McCabe via todd)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -259,6 +294,31 @@ Release 2.0.3-alpha - Unreleased 
 
     HDFS-3964. Make NN log of fs.defaultFS debug rather than info. (eli)
 
+    HDFS-3992. Method org.apache.hadoop.hdfs.TestHftpFileSystem.tearDown()
+    sometimes throws NPEs. (Ivan A. Veselovsky via atm)
+
+    HDFS-3753. Tests don't run with native libraries.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-4000. TestParallelLocalRead fails with "input ByteBuffers
+    must be direct buffers". (Colin Patrick McCabe via eli)
+
+    HDFS-3999. HttpFS OPEN operation expects len parameter, it should be length. (tucu)
+
+    HDFS-4006. TestCheckpoint#testSecondaryHasVeryOutOfDateImage
+    occasionally fails due to unexpected exit. (todd via eli)
+
+    HDFS-4003. test-patch should build the common native libs before
+    running hdfs tests. (Colin Patrick McCabe via eli)
+
+    HDFS-4018. testMiniDFSClusterWithMultipleNN is missing some
+    cluster cleanup. (eli)
+
+    HDFS-4020. TestRBWBlockInvalidation may time out. (eli)
+
+    HDFS-4021. Misleading error message when resources are low on the NameNode.
+    (Christopher Conner via atm)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES
@@ -792,6 +852,8 @@ Release 2.0.2-alpha - 2012-09-07 
     HDFS-3938. remove current limitations from HttpFS docs. (tucu)
 
     HDFS-3944. Httpfs resolveAuthority() is not resolving host correctly. (tucu)
+
+    HDFS-3972. Trash emptier fails in secure HA cluster. (todd via eli)
  
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
@@ -1628,6 +1690,27 @@ Release 2.0.0-alpha - 05-23-2012
     
     HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm)
 
+Release 0.23.5 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HDFS-3829. TestHftpURLTimeouts fails intermittently with JDK7  (Trevor
+    Robinson via tgraves)
+
+    HDFS-3824. TestHftpDelegationToken fails intermittently with JDK7 (Trevor
+    Robinson via tgraves)
+
+    HDFS-3224. Bug in check for DN re-registration with different storage ID
+    (jlowe)
+
 Release 0.23.4 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -1640,7 +1723,10 @@ Release 0.23.4 - UNRELEASED
 
   BUG FIXES
 
-Release 0.23.3 - UNRELEASED
+    HDFS-3831. Failure to renew tokens due to test-sources left in classpath
+    (jlowe via bobby)
+
+Release 0.23.3
 
   INCOMPATIBLE CHANGES
 

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt Thu Oct 11 06:14:26 2012
@@ -85,8 +85,8 @@ CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/confi
 
 add_dual_library(hdfs
     main/native/libhdfs/exception.c
-    main/native/libhdfs/hdfs.c
     main/native/libhdfs/jni_helper.c
+    main/native/libhdfs/hdfs.c
 )
 target_link_dual_libraries(hdfs
     ${JAVA_JVM_LIBRARY}

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/CMakeLists.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/CMakeLists.txt?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/CMakeLists.txt (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/CMakeLists.txt Thu Oct 11 06:14:26 2012
@@ -16,28 +16,21 @@
 # limitations under the License.
 #
 
-find_package(CURL)
-if (CURL_FOUND)
-    include_directories(${CURL_INCLUDE_DIRS})
-else (CURL_FOUND)
-    MESSAGE(STATUS "Failed to find CURL library.")
-endif (CURL_FOUND)
+find_package(CURL REQUIRED)
 
 set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}
 "${CMAKE_SOURCE_DIR}/contrib/libwebhdfs/resources/")
-MESSAGE("CMAKE_MODULE_PATH IS: " ${CMAKE_MODULE_PATH})
 
-find_package(Jansson)
+find_package(Jansson REQUIRED)
 include_directories(${JANSSON_INCLUDE_DIR})
 
 add_dual_library(webhdfs
-    src/exception.c
     src/hdfs_web.c
-    src/hdfs_jni.c
-    src/jni_helper.c
     src/hdfs_http_client.c
     src/hdfs_http_query.c
     src/hdfs_json_parser.c
+    ../../main/native/libhdfs/exception.c
+    ../../main/native/libhdfs/jni_helper.c
 )
 target_link_dual_libraries(webhdfs
     ${JAVA_JVM_LIBRARY}
@@ -55,10 +48,6 @@ add_executable(test_libwebhdfs_ops
 )
 target_link_libraries(test_libwebhdfs_ops
     webhdfs
-    ${CURL_LIBRARY}
-    ${JAVA_JVM_LIBRARY}
-    ${JANSSON_LIBRARY}
-    pthread
 )
 
 add_executable(test_libwebhdfs_read
@@ -66,10 +55,6 @@ add_executable(test_libwebhdfs_read
 )
 target_link_libraries(test_libwebhdfs_read
     webhdfs
-    ${CURL_LIBRARY}
-    ${JAVA_JVM_LIBRARY}
-    ${JANSSON_LIBRARY}
-    pthread
 )
 
 add_executable(test_libwebhdfs_write
@@ -77,10 +62,6 @@ add_executable(test_libwebhdfs_write
 )
 target_link_libraries(test_libwebhdfs_write
     webhdfs
-    ${CURL_LIBRARY}
-    ${JAVA_JVM_LIBRARY}
-    ${JANSSON_LIBRARY}
-    pthread
 )
 
 add_executable(test_libwebhdfs_threaded
@@ -88,8 +69,4 @@ add_executable(test_libwebhdfs_threaded
 )
 target_link_libraries(test_libwebhdfs_threaded
     webhdfs
-    ${CURL_LIBRARY}
-    ${JAVA_JVM_LIBRARY}
-    ${JANSSON_LIBRARY}
-    pthread
 )

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h Thu Oct 11 06:14:26 2012
@@ -21,8 +21,42 @@
 #ifndef _HDFS_HTTP_CLIENT_H_
 #define _HDFS_HTTP_CLIENT_H_
 
-#include "webhdfs.h"
-#include <curl/curl.h>
+#include "hdfs.h" /* for tSize */
+
+#include <pthread.h> /* for pthread_t */
+#include <unistd.h> /* for size_t */
+
+enum hdfsStreamType
+{
+    UNINITIALIZED = 0,
+    INPUT = 1,
+    OUTPUT = 2,
+};
+
+/**
+ * webhdfsBuffer - used for hold the data for read/write from/to http connection
+ */
+typedef struct {
+    const char *wbuffer;  // The user's buffer for uploading
+    size_t remaining;     // Length of content
+    size_t offset;        // offset for reading
+    int openFlag;         // Check whether the hdfsOpenFile has been called before
+    int closeFlag;        // Whether to close the http connection for writing
+    pthread_mutex_t writeMutex; // Synchronization between the curl and hdfsWrite threads
+    pthread_cond_t newwrite_or_close; // Transferring thread waits for this condition
+                                      // when there is no more content for transferring in the buffer
+    pthread_cond_t transfer_finish; // Condition used to indicate finishing transferring (one buffer)
+} webhdfsBuffer;
+
+struct webhdfsFileHandle {
+    char *absPath;
+    int bufferSize;
+    short replication;
+    tSize blockSize;
+    char *datanode;
+    webhdfsBuffer *uploadBuffer;
+    pthread_t connThread;
+};
 
 enum HttpHeader {
     GET,

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c Thu Oct 11 06:14:26 2012
@@ -15,14 +15,76 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
+#include "exception.h"
+#include "hdfs.h" /* for hdfsFileInfo */
+#include "hdfs_json_parser.h"
+
 #include <stdlib.h>
 #include <string.h>
 #include <ctype.h>
 #include <jansson.h>
-#include "hdfs_json_parser.h"
-#include "exception.h"
 
-hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat, int *numEntries, const char *operation); //Forward Declaration
+/**
+ * Exception information after calling JSON operations
+ */
+struct jsonException {
+  const char *exception;
+  const char *javaClassName;
+  const char *message;
+};
+
+static hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat,
+                           int *numEntries, const char *operation);
+
+static void dotsToSlashes(char *str)
+{
+    for (; *str != '\0'; str++) {
+        if (*str == '.')
+            *str = '/';
+    }
+}
+
+int printJsonExceptionV(struct jsonException *exc, int noPrintFlags,
+                        const char *fmt, va_list ap)
+{
+    char *javaClassName = NULL;
+    int excErrno = EINTERNAL, shouldPrint = 0;
+    if (!exc) {
+        fprintf(stderr, "printJsonExceptionV: the jsonException is NULL\n");
+        return EINTERNAL;
+    }
+    javaClassName = strdup(exc->javaClassName);
+    if (!javaClassName) {
+        fprintf(stderr, "printJsonExceptionV: internal out of memory error\n");
+        return EINTERNAL;
+    }
+    dotsToSlashes(javaClassName);
+    getExceptionInfo(javaClassName, noPrintFlags, &excErrno, &shouldPrint);
+    free(javaClassName);
+    
+    if (shouldPrint) {
+        vfprintf(stderr, fmt, ap);
+        fprintf(stderr, " error:\n");
+        fprintf(stderr, "Exception: %s\nJavaClassName: %s\nMessage: %s\n",
+                exc->exception, exc->javaClassName, exc->message);
+    }
+    
+    free(exc);
+    return excErrno;
+}
+
+int printJsonException(struct jsonException *exc, int noPrintFlags,
+                       const char *fmt, ...)
+{
+    va_list ap;
+    int ret;
+    
+    va_start(ap, fmt);
+    ret = printJsonExceptionV(exc, noPrintFlags, fmt, ap);
+    va_end(ap);
+    return ret;
+}
 
 static hdfsFileInfo *json_parse_array(json_t *jobj, char *key, hdfsFileInfo *fileStat, int *numEntries, const char *operation) {
     int arraylen = json_array_size(jobj);                      //Getting the length of the array
@@ -88,12 +150,12 @@ int parseDELETE(char *response) {
     return (parseBoolean(response));
 }
 
-hdfs_exception_msg *parseJsonException(json_t *jobj) {
+struct jsonException *parseJsonException(json_t *jobj) {
     const char *key;
     json_t *value;
-    hdfs_exception_msg *exception = NULL;
+    struct jsonException *exception = NULL;
     
-    exception = (hdfs_exception_msg *) calloc(1, sizeof(hdfs_exception_msg));
+    exception = calloc(1, sizeof(*exception));
     if (!exception) {
         return NULL;
     }
@@ -117,7 +179,7 @@ hdfs_exception_msg *parseJsonException(j
     return exception;
 }
 
-hdfs_exception_msg *parseException(const char *content) {
+struct jsonException *parseException(const char *content) {
     if (!content) {
         return NULL;
     }
@@ -145,7 +207,9 @@ hdfs_exception_msg *parseException(const
     return NULL;
 }
 
-hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat, int *numEntries, const char *operation) {
+static hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat,
+                                  int *numEntries, const char *operation)
+{
     const char *tempstr;
     const char *key;
     json_t *value;
@@ -196,9 +260,9 @@ hdfsFileInfo *parseJsonGFS(json_t *jobj,
                     fileStat = parseJsonGFS(value, &fileStat[0], numEntries, operation);
                 } else if (!strcmp(key,"RemoteException")) {
                     //Besides returning NULL, we also need to print the exception information
-                    hdfs_exception_msg *exception = parseJsonException(value);
+                    struct jsonException *exception = parseJsonException(value);
                     if (exception) {
-                        errno = printExceptionWeb(exception, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
+                        errno = printJsonException(exception, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
                     }
                     
                     if(fileStat != NULL) {
@@ -234,9 +298,9 @@ int checkHeader(char *header, const char
         return 0;
     }
     if(!(strstr(header, responseCode)) || !(header = strstr(header, "Content-Length"))) {
-        hdfs_exception_msg *exc = parseException(content);
+        struct jsonException *exc = parseException(content);
         if (exc) {
-            errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
+            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
         }
         return 0;
     }
@@ -259,14 +323,14 @@ int parseOPEN(const char *header, const 
         return -1;
     }
     if(!(strstr(header,responseCode1) && strstr(header, responseCode2))) {
-        hdfs_exception_msg *exc = parseException(content);
+        struct jsonException *exc = parseException(content);
         if (exc) {
             //if the exception is an IOException and it is because the offset is out of the range
             //do not print out the exception
             if (!strcasecmp(exc->exception, "IOException") && strstr(exc->message, "out of the range")) {
                 return 0;
             }
-            errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (OPEN)");
+            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (OPEN)");
         }
         return -1;
     }
@@ -297,9 +361,9 @@ int checkIfRedirect(const char *const he
     }
     if(!(tempHeader = strstr(headerstr,responseCode))) {
         //process possible exception information
-        hdfs_exception_msg *exc = parseException(content);
+        struct jsonException *exc = parseException(content);
         if (exc) {
-            errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
+            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
         }
         return 0;
     }
@@ -350,9 +414,9 @@ int parseDnWRITE(const char *header, con
         return 0;
     }
     if(!(strstr(header,responseCode))) {
-        hdfs_exception_msg *exc = parseException(content);
+        struct jsonException *exc = parseException(content);
         if (exc) {
-            errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (WRITE(DataNode))");
+            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (WRITE(DataNode))");
         }
         return 0;
     }
@@ -365,9 +429,9 @@ int parseDnAPPEND(const char *header, co
         return 0;
     }
     if(!(strstr(header, responseCode))) {
-        hdfs_exception_msg *exc = parseException(content);
+        struct jsonException *exc = parseException(content);
         if (exc) {
-            errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (APPEND(DataNode))");
+            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (APPEND(DataNode))");
         }
         return 0;
     }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h Thu Oct 11 06:14:26 2012
@@ -17,7 +17,23 @@
  */
 #ifndef _HDFS_JSON_PARSER_H_
 #define _HDFS_JSON_PARSER_H_
-#include "webhdfs.h"
+
+struct jsonException;
+
+/**
+ * Print out JSON exception information.
+ *
+ * @param exc             The exception information to print and free
+ * @param noPrintFlags    Flags which determine which exceptions we should NOT
+ *                        print.
+ * @param fmt             Printf-style format list
+ * @param ...             Printf-style varargs
+ *
+ * @return                The POSIX error number associated with the exception
+ *                        object.
+ */
+int printJsonException(struct jsonException *exc, int noPrintFlags,
+                       const char *fmt, ...);
 
 int parseMKDIR(char *response);
 int parseRENAME(char *response);

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c Thu Oct 11 06:14:26 2012
@@ -16,38 +16,63 @@
  * limitations under the License.
  */
 
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <jni.h>
-#include "webhdfs.h"
+#include "exception.h"
+#include "hdfs.h"
 #include "hdfs_http_client.h"
 #include "hdfs_http_query.h"
 #include "hdfs_json_parser.h"
 #include "jni_helper.h"
-#include "exception.h"
+
+#include <inttypes.h>
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 
 #define HADOOP_HDFS_CONF        "org/apache/hadoop/hdfs/HdfsConfiguration"
 #define HADOOP_NAMENODE         "org/apache/hadoop/hdfs/server/namenode/NameNode"
 #define JAVA_INETSOCKETADDRESS  "java/net/InetSocketAddress"
 
-static void initFileinfo(hdfsFileInfo *fileInfo) {
-    if (fileInfo) {
-        fileInfo->mKind = kObjectKindFile;
-        fileInfo->mName = NULL;
-        fileInfo->mLastMod = 0;
-        fileInfo->mSize = 0;
-        fileInfo->mReplication = 0;
-        fileInfo->mBlockSize = 0;
-        fileInfo->mOwner = NULL;
-        fileInfo->mGroup = NULL;
-        fileInfo->mPermissions = 0;
-        fileInfo->mLastAccess = 0;
-    }
-}
+struct hdfsBuilder {
+    int forceNewInstance;
+    const char *nn;
+    tPort port;
+    const char *kerbTicketCachePath;
+    const char *userName;
+};
+
+/**
+ * The information required for accessing webhdfs,
+ * including the network address of the namenode and the user name
+ *
+ * Unlike the string in hdfsBuilder, the strings in this structure are
+ * dynamically allocated.  This structure will not be freed until we disconnect
+ * from HDFS.
+ */
+struct hdfs_internal {
+    char *nn;
+    tPort port;
+    char *userName;
 
-static webhdfsBuffer *initWebHdfsBuffer() {
-    webhdfsBuffer *buffer = (webhdfsBuffer *) calloc(1, sizeof(webhdfsBuffer));
+    /**
+     * Working directory -- stored with a trailing slash.
+     */
+    char *workingDir;
+};
+
+/**
+ * The 'file-handle' to a file in hdfs.
+ */
+struct hdfsFile_internal {
+    struct webhdfsFileHandle* file;
+    enum hdfsStreamType type;
+    int flags;
+    tOffset offset;
+};
+
+static webhdfsBuffer *initWebHdfsBuffer(void)
+{
+    webhdfsBuffer *buffer = calloc(1, sizeof(*buffer));
     if (!buffer) {
         fprintf(stderr, "Fail to allocate memory for webhdfsBuffer.\n");
         return NULL;
@@ -107,49 +132,36 @@ static void freeWebhdfsBuffer(webhdfsBuf
 }
 
 static void freeWebFileHandle(struct webhdfsFileHandle * handle) {
-    if (handle) {
-        freeWebhdfsBuffer(handle->uploadBuffer);
-        if (handle->datanode) {
-            free(handle->datanode);
-        }
-        if (handle->absPath) {
-            free(handle->absPath);
-        }
-        free(handle);
-        handle = NULL;
-    }
+    if (!handle)
+        return;
+    freeWebhdfsBuffer(handle->uploadBuffer);
+    free(handle->datanode);
+    free(handle->absPath);
+    free(handle);
 }
 
 struct hdfsBuilder *hdfsNewBuilder(void)
 {
     struct hdfsBuilder *bld = calloc(1, sizeof(struct hdfsBuilder));
-    if (!bld) {
+    if (!bld)
         return NULL;
-    }
-    hdfsSetWorkingDirectory(bld, "/");
     return bld;
 }
 
 void hdfsFreeBuilder(struct hdfsBuilder *bld)
 {
-    if (bld && bld->workingDir) {
-        free(bld->workingDir);
-    }
     free(bld);
 }
 
 void hdfsBuilderSetForceNewInstance(struct hdfsBuilder *bld)
 {
-    if (bld) {
-        bld->forceNewInstance = 1;
-    }
+    // We don't cache instances in libwebhdfs, so this is not applicable.
 }
 
 void hdfsBuilderSetNameNode(struct hdfsBuilder *bld, const char *nn)
 {
     if (bld) {
         bld->nn = nn;
-        bld->nn_jni = nn;
     }
 }
 
@@ -199,7 +211,7 @@ hdfsFS hdfsConnectNewInstance(const char
         return NULL;
     }
     hdfsBuilderSetForceNewInstance(bld);
-    return bld;
+    return hdfsBuilderConnect(bld);
 }
 
 hdfsFS hdfsConnectAsUserNewInstance(const char* host, tPort port,
@@ -215,290 +227,356 @@ hdfsFS hdfsConnectAsUserNewInstance(cons
     return hdfsBuilderConnect(bld);
 }
 
-const char *hdfsBuilderToStr(const struct hdfsBuilder *bld,
-                             char *buf, size_t bufLen);
+static const char *maybeNull(const char *str)
+{
+    return str ? str : "(NULL)";
+}
+
+static const char *hdfsBuilderToStr(const struct hdfsBuilder *bld,
+                                    char *buf, size_t bufLen)
+{
+    snprintf(buf, bufLen, "nn=%s, port=%d, "
+             "kerbTicketCachePath=%s, userName=%s",
+             maybeNull(bld->nn), bld->port,
+             maybeNull(bld->kerbTicketCachePath), maybeNull(bld->userName));
+    return buf;
+}
+
+static void freeWebHdfsInternal(struct hdfs_internal *fs)
+{
+    if (fs) {
+        free(fs->nn);
+        free(fs->userName);
+        free(fs->workingDir);
+    }
+}
+
+static int retrieveDefaults(const struct hdfsBuilder *bld, tPort *port,
+                            char **nn)
+{
+    JNIEnv *env = 0;
+    jobject jHDFSConf = NULL, jAddress = NULL;
+    jstring jHostName = NULL;
+    jvalue jVal;
+    jthrowable jthr = NULL;
+    int ret = 0;
+    char buf[512];
+    
+    // TODO: can we do this without using JNI?  See HDFS-3917
+    env = getJNIEnv();
+    if (!env) {
+        return EINTERNAL;
+    }
+    
+    //  jHDFSConf = new HDFSConfiguration();
+    jthr = constructNewObjectOfClass(env, &jHDFSConf, HADOOP_HDFS_CONF, "()V");
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                    "hdfsBuilderConnect(%s)",
+                                    hdfsBuilderToStr(bld, buf, sizeof(buf)));
+        goto done;
+    }
+    
+    jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_NAMENODE, "getHttpAddress",
+                        "(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/InetSocketAddress;",
+                        jHDFSConf);
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                        "hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf)));
+        goto done;
+    }
+    jAddress = jVal.l;
+    
+    jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
+                        JAVA_INETSOCKETADDRESS, "getPort", "()I");
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                    "hdfsBuilderConnect(%s)",
+                                    hdfsBuilderToStr(bld, buf, sizeof(buf)));
+        goto done;
+    }
+    *port = jVal.i;
+    
+    jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
+                        JAVA_INETSOCKETADDRESS, "getHostName", "()Ljava/lang/String;");
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                    "hdfsBuilderConnect(%s)",
+                                    hdfsBuilderToStr(bld, buf, sizeof(buf)));
+        goto done;
+    }
+    jHostName = jVal.l;
+    jthr = newCStr(env, jHostName, nn);
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                    "hdfsBuilderConnect(%s)",
+                                    hdfsBuilderToStr(bld, buf, sizeof(buf)));
+        goto done;
+    }
+
+done:
+    destroyLocalReference(env, jHDFSConf);
+    destroyLocalReference(env, jAddress);
+    destroyLocalReference(env, jHostName);
+    return ret;
+}
 
 hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
 {
+    struct hdfs_internal *fs = NULL;
+    int ret;
+
     if (!bld) {
-        return NULL;
+        ret = EINVAL;
+        goto done;
     }
-    // if the hostname is null for the namenode, set it to localhost
-    //only handle bld->nn
     if (bld->nn == NULL) {
-        bld->nn = "localhost";
-    } else {
-        /* check whether the hostname of the namenode (nn in hdfsBuilder) has already contained the port */
-        const char *lastColon = rindex(bld->nn, ':');
-        if (lastColon && (strspn(lastColon + 1, "0123456789") == strlen(lastColon + 1))) {
-            fprintf(stderr, "port %d was given, but URI '%s' already "
-                    "contains a port!\n", bld->port, bld->nn);
-            char *newAddr = (char *)malloc(strlen(bld->nn) - strlen(lastColon) + 1);
-            if (!newAddr) {
-                return NULL;
-            }
-            strncpy(newAddr, bld->nn, strlen(bld->nn) - strlen(lastColon));
-            newAddr[strlen(bld->nn) - strlen(lastColon)] = '\0';
-            free(bld->nn);
-            bld->nn = newAddr;
-        }
+        // In the JNI version of libhdfs this returns a LocalFileSystem.
+        ret = ENOTSUP;
+        goto done;
     }
     
-    /* if the namenode is "default" and/or the port of namenode is 0, get the default namenode/port by using JNI */
+    fs = calloc(1, sizeof(*fs));
+    if (!fs) {
+        ret = ENOMEM;
+        goto done;
+    }
+    /* If the namenode is "default" and/or the port of namenode is 0, get the
+     * default namenode/port */
     if (bld->port == 0 || !strcasecmp("default", bld->nn)) {
-        JNIEnv *env = 0;
-        jobject jHDFSConf = NULL, jAddress = NULL;
-        jvalue jVal;
-        jthrowable jthr = NULL;
-        int ret = 0;
-        char buf[512];
-        
-        //Get the JNIEnv* corresponding to current thread
-        env = getJNIEnv();
-        if (env == NULL) {
-            errno = EINTERNAL;
-            free(bld);
-            bld = NULL;
-            return NULL;
-        }
-        
-        //  jHDFSConf = new HDFSConfiguration();
-        jthr = constructNewObjectOfClass(env, &jHDFSConf, HADOOP_HDFS_CONF, "()V");
-        if (jthr) {
-            ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                        "hdfsBuilderConnect(%s)",
-                                        hdfsBuilderToStr(bld, buf, sizeof(buf)));
+        ret = retrieveDefaults(bld, &fs->port, &fs->nn);
+        if (ret)
+            goto done;
+    } else {
+        fs->port = bld->port;
+        fs->nn = strdup(bld->nn);
+        if (!fs->nn) {
+            ret = ENOMEM;
             goto done;
         }
-        
-        jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_NAMENODE, "getHttpAddress",
-                            "(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/InetSocketAddress;",
-                            jHDFSConf);
-        if (jthr) {
-            ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                            "hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf)));
-            goto done; //free(bld), deleteReference for jHDFSConf
-        }
-        jAddress = jVal.l;
-        
-        if (bld->port == 0) {
-            jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
-                                JAVA_INETSOCKETADDRESS, "getPort", "()I");
-            if (jthr) {
-                ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                            "hdfsBuilderConnect(%s)",
-                                            hdfsBuilderToStr(bld, buf, sizeof(buf)));
-                goto done;
-            }
-            bld->port = jVal.i;
-        }
-        
-        if (!strcasecmp("default", bld->nn)) {
-            jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
-                                JAVA_INETSOCKETADDRESS, "getHostName", "()Ljava/lang/String;");
-            if (jthr) {
-                ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                            "hdfsBuilderConnect(%s)",
-                                            hdfsBuilderToStr(bld, buf, sizeof(buf)));
-                goto done;
-            }
-            bld->nn = (const char*) ((*env)->GetStringUTFChars(env, jVal.l, NULL));
-        }
-        
-    done:
-        destroyLocalReference(env, jHDFSConf);
-        destroyLocalReference(env, jAddress);
-        if (ret) { //if there is error/exception, we free the builder and return NULL
-            free(bld);
-            bld = NULL;
+    }
+    if (bld->userName) {
+        // userName may be NULL
+        fs->userName = strdup(bld->userName);
+        if (!fs->userName) {
+            ret = ENOMEM;
+            goto done;
         }
     }
-    
+    // The working directory starts out as root.
+    fs->workingDir = strdup("/");
+    if (!fs->workingDir) {
+        ret = ENOMEM;
+        goto done;
+    }
     //for debug
     fprintf(stderr, "namenode: %s:%d\n", bld->nn, bld->port);
-    return bld;
+
+done:
+    free(bld);
+    if (ret) {
+        freeWebHdfsInternal(fs);
+        errno = ret;
+        return NULL;
+    }
+    return fs;
 }
 
 int hdfsDisconnect(hdfsFS fs)
 {
     if (fs == NULL) {
-        errno = EBADF;
+        errno = EINVAL;
         return -1;
-    } else {
-        free(fs);
-        fs = NULL;
     }
+    freeWebHdfsInternal(fs);
     return 0;
 }
 
-char *getAbsolutePath(hdfsFS fs, const char *path) {
-    if (fs == NULL || path == NULL) {
-        return NULL;
-    }
+static char *getAbsolutePath(hdfsFS fs, const char *path)
+{
     char *absPath = NULL;
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
+    size_t absPathLen;
     
-    if ('/' != *path && bld->workingDir) {
-        absPath = (char *)malloc(strlen(bld->workingDir) + strlen(path) + 1);
-        if (!absPath) {
-            return NULL;
-        }
-        absPath = strcpy(absPath, bld->workingDir);
-        absPath = strcat(absPath, path);
-        return absPath;
-    } else {
-        absPath = (char *)malloc(strlen(path) + 1);
-        if (!absPath) {
-            return NULL;
-        }
-        absPath = strcpy(absPath, path);
-        return absPath;
+    if (path[0] == '/') {
+        // path is already absolute.
+        return strdup(path);
+    }
+    // prepend the workingDir to the path.
+    absPathLen = strlen(fs->workingDir) + strlen(path);
+    absPath = malloc(absPathLen + 1);
+    if (!absPath) {
+        return NULL;
     }
+    snprintf(absPath, absPathLen + 1, "%s%s", fs->workingDir, path);
+    return absPath;
 }
 
 int hdfsCreateDirectory(hdfsFS fs, const char* path)
 {
+    char *url = NULL, *absPath = NULL;
+    Response resp = NULL;
+    int ret = 0;
+
     if (fs == NULL || path == NULL) {
-        return -1;
+        ret = EINVAL;
+        goto done;
     }
-    
-    char *absPath = getAbsolutePath(fs, path);
+    absPath = getAbsolutePath(fs, path);
     if (!absPath) {
-        return -1;
+        ret = ENOMEM;
+        goto done;
     }
-    
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url = NULL;
-    Response resp = NULL;
-    int ret = 0;
-    
-    if(!((url = prepareMKDIR(bld->nn, bld->port, absPath, bld->userName))
+    if(!((url = prepareMKDIR(fs->nn, fs->port, absPath, fs->userName))
          && (resp = launchMKDIR(url))
          && (parseMKDIR(resp->body->content)))) {
-        ret = -1;
+        ret = EIO;
+        goto done;
     }
     
+done:
     freeResponse(resp);
     free(url);
     free(absPath);
-    return ret;
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    return 0;
 }
 
 int hdfsChmod(hdfsFS fs, const char* path, short mode)
 {
+    char *absPath = NULL, *url = NULL;
+    Response resp = NULL;
+    int ret = 0;
+
     if (fs == NULL || path == NULL) {
-        return -1;
+        ret = EINVAL;
+        goto done;
     }
-    
-    char *absPath = getAbsolutePath(fs, path);
+    absPath = getAbsolutePath(fs, path);
     if (!absPath) {
-        return -1;
+        ret = ENOMEM;
+        goto done;
     }
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url=NULL;
-    Response resp = NULL;
-    int ret = 0;
-    
-    if(!((url = prepareCHMOD(bld->nn, bld->port, absPath, (int)mode, bld->userName))
+    if(!((url = prepareCHMOD(fs->nn, fs->port, absPath, (int)mode, fs->userName))
          && (resp = launchCHMOD(url))
          && (parseCHMOD(resp->header->content, resp->body->content)))) {
-        ret = -1;
+        ret = EIO;
+        goto done;
     }
-    
+done:
     freeResponse(resp);
     free(absPath);
     free(url);
-    return ret;
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    return 0;
 }
 
 int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
 {
+    int ret = 0;
+    char *absPath = NULL, *url = NULL;
+    Response resp = NULL;
+
     if (fs == NULL || path == NULL) {
-        return -1;
+        ret = EINVAL;
+        goto done;
     }
     
-    char *absPath = getAbsolutePath(fs, path);
+    absPath = getAbsolutePath(fs, path);
     if (!absPath) {
-        return -1;
+        ret = ENOMEM;
+        goto done;
     }
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url=NULL;
-    Response resp = NULL;
-    int ret = 0;
     
-    if(!((url = prepareCHOWN(bld->nn, bld->port, absPath, owner, group, bld->userName))
+    if(!((url = prepareCHOWN(fs->nn, fs->port, absPath, owner, group, fs->userName))
          && (resp = launchCHOWN(url))
          && (parseCHOWN(resp->header->content, resp->body->content)))) {
-        ret = -1;
+        ret = EIO;
+        goto done;
     }
     
+done:
     freeResponse(resp);
     free(absPath);
     free(url);
-    return ret;
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    return 0;
 }
 
 int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
 {
+    char *oldAbsPath = NULL, *newAbsPath = NULL, *url = NULL;
+    int ret = 0;
+    Response resp = NULL;
+
     if (fs == NULL || oldPath == NULL || newPath == NULL) {
-        return -1;
+        ret = EINVAL;
+        goto done;
     }
-    
-    char *oldAbsPath = getAbsolutePath(fs, oldPath);
+    oldAbsPath = getAbsolutePath(fs, oldPath);
     if (!oldAbsPath) {
-        return -1;
+        ret = ENOMEM;
+        goto done;
     }
-    char *newAbsPath = getAbsolutePath(fs, newPath);
+    newAbsPath = getAbsolutePath(fs, newPath);
     if (!newAbsPath) {
-        return -1;
+        ret = ENOMEM;
+        goto done;
     }
-    
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url=NULL;
-    Response resp = NULL;
-    int ret = 0;
-    
-    if(!((url = prepareRENAME(bld->nn, bld->port, oldAbsPath, newAbsPath, bld->userName))
+    if(!((url = prepareRENAME(fs->nn, fs->port, oldAbsPath, newAbsPath, fs->userName))
          && (resp = launchRENAME(url))
          && (parseRENAME(resp->body->content)))) {
         ret = -1;
     }
-    
+done:
     freeResponse(resp);
     free(oldAbsPath);
     free(newAbsPath);
     free(url);
-    return ret;
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    return 0;
 }
 
 hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
 {
-    if (fs == NULL || path == NULL) {
-        return NULL;
-    }
-    
-    char *absPath = getAbsolutePath(fs, path);
-    if (!absPath) {
-        return NULL;
-    }
-    
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
+    char *absPath = NULL;
     char *url=NULL;
     Response resp = NULL;
     int numEntries = 0;
     int ret = 0;
-    
-    hdfsFileInfo * fileInfo = (hdfsFileInfo *) calloc(1, sizeof(hdfsFileInfo));
+    hdfsFileInfo *fileInfo = NULL;
+
+    if (fs == NULL || path == NULL) {
+        ret = EINVAL;
+        goto done;
+    }
+    absPath = getAbsolutePath(fs, path);
+    if (!absPath) {
+        ret = ENOMEM;
+        goto done;
+    }
+    fileInfo = (hdfsFileInfo *) calloc(1, sizeof(hdfsFileInfo));
     if (!fileInfo) {
-        ret = -1;
+        ret = ENOMEM;
         goto done;
     }
-    initFileinfo(fileInfo);
-    
-    if(!((url = prepareGFS(bld->nn, bld->port, absPath, bld->userName))
+    fileInfo->mKind = kObjectKindFile;
+
+    if(!((url = prepareGFS(fs->nn, fs->port, absPath, fs->userName))
          && (resp = launchGFS(url))
          && (fileInfo = parseGFS(resp->body->content, fileInfo, &numEntries))))  {
-        ret = -1;
+        ret = EIO;
         goto done;
     }
     
@@ -511,163 +589,172 @@ done:
         return fileInfo;
     } else {
         free(fileInfo);
+        errno = ret;
         return NULL;
     }
 }
 
 hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
 {
+    char *url = NULL, *absPath = NULL;
+    Response resp = NULL;
+    int ret = 0;
+    hdfsFileInfo *fileInfo = NULL;
+
     if (fs == NULL || path == NULL) {
-        return NULL;
+        ret = EINVAL;
+        goto done;
     }
-    
-    char *absPath = getAbsolutePath(fs, path);
+    absPath = getAbsolutePath(fs, path);
     if (!absPath) {
-        return NULL;
+        ret = ENOMEM;
+        goto done;
     }
-
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url = NULL;
-    Response resp = NULL;
-    int ret = 0;
-    
-    hdfsFileInfo * fileInfo = (hdfsFileInfo *) calloc(1, sizeof(hdfsFileInfo));
+    fileInfo = calloc(1, sizeof(*fileInfo));
     if (!fileInfo) {
-        ret = -1;
+        ret = ENOMEM;
         goto done;
     }
-    
-    if(!((url = prepareLS(bld->nn, bld->port, absPath, bld->userName))
+    if(!((url = prepareLS(fs->nn, fs->port, absPath, fs->userName))
          && (resp = launchLS(url))
          && (fileInfo = parseGFS(resp->body->content, fileInfo, numEntries))))  {
-        ret = -1;
+        ret = EIO;
         goto done;
     }
-    
 done:
     freeResponse(resp);
     free(absPath);
     free(url);
-    
+
     if (ret == 0) {
         return fileInfo;
     } else {
-        free(fileInfo);
+        hdfsFreeFileInfo(fileInfo, 1);
+        errno = ret;
         return NULL;
     }
 }
 
 int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
 {
+    char *url = NULL, *absPath = NULL;
+    Response resp = NULL;
+    int ret = 0;
+
     if (fs == NULL || path == NULL) {
-        return -1;
+        ret = EINVAL;
+        goto done;
     }
-    char *absPath = getAbsolutePath(fs, path);
+    absPath = getAbsolutePath(fs, path);
     if (!absPath) {
-        return -1;
+        ret = ENOMEM;
+        goto done;
     }
-    
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url = NULL;
-    Response resp = NULL;
-    int ret = 0;
-    
-    if(!((url = prepareSETREPLICATION(bld->nn, bld->port, absPath, replication, bld->userName))
+    if(!((url = prepareSETREPLICATION(fs->nn, fs->port, absPath, replication, fs->userName))
          && (resp = launchSETREPLICATION(url))
          && (parseSETREPLICATION(resp->body->content)))) {
-        ret = -1;
+        ret = EIO;
+        goto done;
     }
-    
+done:
     freeResponse(resp);
     free(absPath);
     free(url);
-    return ret;
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    return 0;
 }
 
 void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
 {
-    //Free the mName, mOwner, and mGroup
     int i;
+
     for (i=0; i < numEntries; ++i) {
-        if (hdfsFileInfo[i].mName) {
-            free(hdfsFileInfo[i].mName);
-        }
-        if (hdfsFileInfo[i].mOwner) {
-            free(hdfsFileInfo[i].mOwner);
-        }
-        if (hdfsFileInfo[i].mGroup) {
-            free(hdfsFileInfo[i].mGroup);
-        }
+        free(hdfsFileInfo[i].mName);
+        free(hdfsFileInfo[i].mOwner);
+        free(hdfsFileInfo[i].mGroup);
     }
-    
-    //Free entire block
     free(hdfsFileInfo);
-    hdfsFileInfo = NULL;
 }
 
 int hdfsDelete(hdfsFS fs, const char* path, int recursive)
 {
+    char *url = NULL, *absPath = NULL;
+    Response resp = NULL;
+    int ret = 0;
+
     if (fs == NULL || path == NULL) {
-        return -1;
+        ret = EINVAL;
+        goto done;
     }
-    char *absPath = getAbsolutePath(fs, path);
+    absPath = getAbsolutePath(fs, path);
     if (!absPath) {
-        return -1;
+        ret = ENOMEM;
+        goto done;
     }
-    
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url = NULL;
-    Response resp = NULL;
-    int ret = 0;
-    
-    if(!((url = prepareDELETE(bld->nn, bld->port, absPath, recursive, bld->userName))
+    if(!((url = prepareDELETE(fs->nn, fs->port, absPath, recursive, fs->userName))
          && (resp = launchDELETE(url))
          && (parseDELETE(resp->body->content)))) {
-        ret = -1;
+        ret = EIO;
+        goto done;
     }
     
+done:
     freeResponse(resp);
     free(absPath);
     free(url);
-    return ret;
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    return 0;
 }
 
 int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
 {
+    char *url = NULL, *absPath = NULL;
+    Response resp = NULL;
+    int ret = 0;
+
     if (fs == NULL || path == NULL) {
-        return -1;
+        ret = EINVAL;
+        goto done;
     }
-    char *absPath = getAbsolutePath(fs, path);
+    absPath = getAbsolutePath(fs, path);
     if (!absPath) {
-        return -1;
+        ret = ENOMEM;
+        goto done;
     }
-    
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url = NULL;
-    Response resp = NULL;
-    int ret = 0;
-    
-    if(!((url = prepareUTIMES(bld->nn, bld->port, absPath, mtime, atime, bld->userName))
+    if(!((url = prepareUTIMES(fs->nn, fs->port, absPath, mtime, atime,
+                              fs->userName))
          && (resp = launchUTIMES(url))
          && (parseUTIMES(resp->header->content, resp->body->content)))) {
-        ret = -1;
+        ret = EIO;
+        goto done;
     }
     
+done:
     freeResponse(resp);
     free(absPath);
     free(url);
-    return ret;
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    return 0;
 }
 
 int hdfsExists(hdfsFS fs, const char *path)
 {
     hdfsFileInfo *fileInfo = hdfsGetPathInfo(fs, path);
-    if (fileInfo) {
-        hdfsFreeFileInfo(fileInfo, 1);
-        return 0;
-    } else {
+    if (!fileInfo) {
+        // (errno will have been set by hdfsGetPathInfo)
         return -1;
     }
+    hdfsFreeFileInfo(fileInfo, 1);
+    return 0;
 }
 
 typedef struct {
@@ -701,39 +788,160 @@ static void *writeThreadOperation(void *
     return data;
 }
 
+/**
+ * Free the memory associated with a webHDFS file handle.
+ *
+ * No other resources will be freed.
+ *
+ * @param file            The webhdfs file handle
+ */
+static void freeFileInternal(hdfsFile file)
+{
+    if (!file)
+        return;
+    freeWebFileHandle(file->file);
+    free(file);
+}
+
+/**
+ * Helper function for opening a file for OUTPUT.
+ *
+ * As part of the open process for OUTPUT files, we have to connect to the
+ * NameNode and get the URL of the corresponding DataNode.
+ * We also create a background thread here for doing I/O.
+ *
+ * @param webhandle              The webhandle being opened
+ * @return                       0 on success; error code otherwise
+ */
+static int hdfsOpenOutputFileImpl(hdfsFS fs, hdfsFile file)
+{
+    struct webhdfsFileHandle *webhandle = file->file;
+    Response resp = NULL;
+    int parseRet, append, ret = 0;
+    char *prepareUrl = NULL, *dnUrl = NULL;
+    threadData *data = NULL;
+
+    webhandle->uploadBuffer = initWebHdfsBuffer();
+    if (!webhandle->uploadBuffer) {
+        ret = ENOMEM;
+        goto done;
+    }
+    append = file->flags & O_APPEND;
+    if (!append) {
+        // If we're not appending, send a create request to the NN
+        prepareUrl = prepareNnWRITE(fs->nn, fs->port, webhandle->absPath,
+            fs->userName, webhandle->replication, webhandle->blockSize);
+    } else {
+        prepareUrl = prepareNnAPPEND(fs->nn, fs->port, webhandle->absPath,
+                              fs->userName);
+    }
+    if (!prepareUrl) {
+        fprintf(stderr, "fail to create the url connecting to namenode "
+                "for file creation/appending\n");
+        ret = EIO;
+        goto done;
+    }
+    if (!append) {
+        resp = launchNnWRITE(prepareUrl);
+    } else {
+        resp = launchNnAPPEND(prepareUrl);
+    }
+    if (!resp) {
+        fprintf(stderr, "fail to get the response from namenode for "
+                "file creation/appending\n");
+        ret = EIO;
+        goto done;
+    }
+    if (!append) {
+        parseRet = parseNnWRITE(resp->header->content, resp->body->content);
+    } else {
+        parseRet = parseNnAPPEND(resp->header->content, resp->body->content);
+    }
+    if (!parseRet) {
+        fprintf(stderr, "fail to parse the response from namenode for "
+                "file creation/appending\n");
+        ret = EIO;
+        goto done;
+    }
+    dnUrl = parseDnLoc(resp->header->content);
+    if (!dnUrl) {
+        fprintf(stderr, "fail to get the datanode url from namenode "
+                "for file creation/appending\n");
+        ret = EIO;
+        goto done;
+    }
+    //store the datanode url in the file handle
+    webhandle->datanode = strdup(dnUrl);
+    if (!webhandle->datanode) {
+        ret = ENOMEM;
+        goto done;
+    }
+    //create a new thread for performing the http transferring
+    data = calloc(1, sizeof(*data));
+    if (!data) {
+        ret = ENOMEM;
+        goto done;
+    }
+    data->url = strdup(dnUrl);
+    if (!data->url) {
+        ret = ENOMEM;
+        goto done;
+    }
+    data->flags = file->flags;
+    data->uploadBuffer = webhandle->uploadBuffer;
+    ret = pthread_create(&webhandle->connThread, NULL,
+                         writeThreadOperation, data);
+    if (ret) {
+        fprintf(stderr, "Failed to create the writing thread.\n");
+        goto done;
+    }
+    webhandle->uploadBuffer->openFlag = 1;
+
+done:
+    freeResponse(resp);
+    free(prepareUrl);
+    free(dnUrl);
+    if (ret) {
+        free(data->url);
+        free(data);
+    }
+    return ret;
+}
+
 hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
                       int bufferSize, short replication, tSize blockSize)
 {
-    /*
-     * the original version of libhdfs based on JNI store a fsinputstream/fsoutputstream in the hdfsFile
-     * in libwebhdfs that is based on webhdfs, we store (absolute_path, buffersize, replication, blocksize) in it
-     */
+    int ret = 0;
+    int accmode = flags & O_ACCMODE;
+    struct webhdfsFileHandle *webhandle = NULL;
+    hdfsFile file = NULL;
+
     if (fs == NULL || path == NULL) {
-        return NULL;
+        ret = EINVAL;
+        goto done;
     }
-
-    int accmode = flags & O_ACCMODE;
     if (accmode == O_RDWR) {
+        // TODO: the original libhdfs has very hackish support for this; should
+        // we do the same?  It would actually be a lot easier in libwebhdfs
+        // since the protocol isn't connection-oriented. 
         fprintf(stderr, "ERROR: cannot open an hdfs file in O_RDWR mode\n");
-        errno = ENOTSUP;
-        return NULL;
+        ret = ENOTSUP;
+        goto done;
     }
-    
     if ((flags & O_CREAT) && (flags & O_EXCL)) {
         fprintf(stderr, "WARN: hdfs does not truly support O_CREATE && O_EXCL\n");
     }
-    
-    hdfsFile hdfsFileHandle = (hdfsFile) calloc(1, sizeof(struct hdfsFile_internal));
-    if (!hdfsFileHandle) {
-        return NULL;
+    file = calloc(1, sizeof(struct hdfsFile_internal));
+    if (!file) {
+        ret = ENOMEM;
+        goto done;
     }
-    int ret = 0;
-    hdfsFileHandle->flags = flags;
-    hdfsFileHandle->type = accmode == O_RDONLY ? INPUT : OUTPUT;
-    hdfsFileHandle->offset = 0;
-    struct webhdfsFileHandle *webhandle = (struct webhdfsFileHandle *) calloc(1, sizeof(struct webhdfsFileHandle));
+    file->flags = flags;
+    file->type = accmode == O_RDONLY ? INPUT : OUTPUT;
+    file->offset = 0;
+    webhandle = calloc(1, sizeof(struct webhdfsFileHandle));
     if (!webhandle) {
-        ret = -1;
+        ret = ENOMEM;
         goto done;
     }
     webhandle->bufferSize = bufferSize;
@@ -741,105 +949,28 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const c
     webhandle->blockSize = blockSize;
     webhandle->absPath = getAbsolutePath(fs, path);
     if (!webhandle->absPath) {
-        ret = -1;
+        ret = ENOMEM;
         goto done;
     }
-    hdfsFileHandle->file = webhandle;
-    
-    //for write/append, need to connect to the namenode
-    //and get the url of corresponding datanode
-    if (hdfsFileHandle->type == OUTPUT) {
-        webhandle->uploadBuffer = initWebHdfsBuffer();
-        if (!webhandle->uploadBuffer) {
-            ret = -1;
-            goto done;
-        }
-        struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-        char *url = NULL;
-        Response resp = NULL;
-        int append = flags & O_APPEND;
-        int create = append ? 0 : 1;
-        
-        //if create: send create request to NN
-        if (create) {
-            url = prepareNnWRITE(bld->nn, bld->port, webhandle->absPath, bld->userName, webhandle->replication, webhandle->blockSize);
-        } else if (append) {
-            url = prepareNnAPPEND(bld->nn, bld->port, webhandle->absPath, bld->userName);
-        }
-        if (!url) {
-            fprintf(stderr,
-                    "fail to create the url connecting to namenode for file creation/appending\n");
-            ret = -1;
+    file->file = webhandle;
+    if (file->type == OUTPUT) {
+        ret = hdfsOpenOutputFileImpl(fs, file);
+        if (ret) {
             goto done;
         }
+    }
 
-        if (create) {
-            resp = launchNnWRITE(url);
-        } else if (append) {
-            resp = launchNnAPPEND(url);
-        }
-        if (!resp) {
-            fprintf(stderr,
-                    "fail to get the response from namenode for file creation/appending\n");
-            free(url);
-            ret = -1;
-            goto done;
-        }
-        
-        int parseRet = 0;
-        if (create) {
-            parseRet = parseNnWRITE(resp->header->content, resp->body->content);
-        } else if (append) {
-            parseRet = parseNnAPPEND(resp->header->content, resp->body->content);
-        }
-        if (!parseRet) {
-            fprintf(stderr,
-                    "fail to parse the response from namenode for file creation/appending\n");
-            free(url);
-            freeResponse(resp);
-            ret = -1;
-            goto done;
-        }
-            
-        free(url);
-        url = parseDnLoc(resp->header->content);
-        if (!url) {
-            fprintf(stderr,
-                    "fail to get the datanode url from namenode for file creation/appending\n");
-            freeResponse(resp);
-            ret = -1;
-            return NULL;
-        }
-        freeResponse(resp);
-        //store the datanode url in the file handle
-        webhandle->datanode = strdup(url);
- 
-        //create a new thread for performing the http transferring
-        threadData *data = (threadData *) calloc(1, sizeof(threadData));
-        if (!data) {
-            ret = -1;
-            goto done;
-        }
-        data->url = strdup(url);
-        data->flags = flags;
-        data->uploadBuffer = webhandle->uploadBuffer;
-        free(url);
-        ret = pthread_create(&webhandle->connThread, NULL, writeThreadOperation, data);
-        if (ret) {
-            fprintf(stderr, "Failed to create the writing thread.\n");
+done:
+    if (ret) {
+        if (file) {
+            freeFileInternal(file); // Also frees webhandle
         } else {
-            webhandle->uploadBuffer->openFlag = 1;
+            freeWebFileHandle(webhandle);
         }
-    }
-    
-done:
-    if (ret == 0) {
-        return hdfsFileHandle;
-    } else {
-        freeWebFileHandle(webhandle);
-        free(hdfsFileHandle);
+        errno = ret;
         return NULL;
     }
+    return file;
 }
 
 tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer, tSize length)
@@ -848,15 +979,17 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile file
         return 0;
     }
     if (fs == NULL || file == NULL || file->type != OUTPUT || length < 0) {
+        errno = EBADF;
         return -1;
     }
     
-    struct webhdfsFileHandle *wfile = (struct webhdfsFileHandle *) file->file;
+    struct webhdfsFileHandle *wfile = file->file;
     if (wfile->uploadBuffer && wfile->uploadBuffer->openFlag) {
         resetWebhdfsBuffer(wfile->uploadBuffer, buffer, length);
         return length;
     } else {
         fprintf(stderr, "Error: have not opened the file %s for writing yet.\n", wfile->absPath);
+        errno = EBADF;
         return -1;
     }
 }
@@ -868,7 +1001,7 @@ int hdfsCloseFile(hdfsFS fs, hdfsFile fi
     if (file->type == OUTPUT) {
         void *respv;
         threadData *tdata;
-        struct webhdfsFileHandle *wfile = (struct webhdfsFileHandle *) file->file;
+        struct webhdfsFileHandle *wfile = file->file;
         pthread_mutex_lock(&(wfile->uploadBuffer->writeMutex));
         wfile->uploadBuffer->closeFlag = 1;
         pthread_cond_signal(&wfile->uploadBuffer->newwrite_or_close);
@@ -893,13 +1026,10 @@ int hdfsCloseFile(hdfsFS fs, hdfsFile fi
         //free the threaddata
         freeThreadData(tdata);
     }
-    
-    fprintf(stderr, "To clean the webfilehandle...\n");
-    if (file) {
-        freeWebFileHandle(file->file);
-        free(file);
-        file = NULL;
-        fprintf(stderr, "Cleaned the webfilehandle...\n");
+    freeFileInternal(file);
+    fprintf(stderr, "Closed the webfilehandle...\n");
+    if (ret) {
+        errno = EIO;
     }
     return ret;
 }
@@ -914,111 +1044,155 @@ int hdfsFileIsOpenForWrite(hdfsFile file
     return (file->type == OUTPUT);
 }
 
-tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length)
+static int hdfsReadImpl(hdfsFS fs, hdfsFile file, void* buffer, tSize off,
+                        tSize length, tSize *numRead)
 {
-    if (length == 0) {
-        return 0;
-    }
-    if (fs == NULL || file == NULL || file->type != INPUT || buffer == NULL || length < 0) {
-        errno = EINVAL;
-        return -1;
-    }
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    struct webhdfsFileHandle *webFile = (struct webhdfsFileHandle *) file->file;
+    int ret = 0;
     char *url = NULL;
     Response resp = NULL;
     int openResult = -1;
-    
-    resp = (Response) calloc(1, sizeof(*resp));
+
+    if (fs == NULL || file == NULL || file->type != INPUT || buffer == NULL ||
+            length < 0) {
+        ret = EINVAL;
+        goto done;
+    }
+    if (length == 0) {
+        // Special case: the user supplied a buffer of zero length, so there is
+        // nothing to do.
+        *numRead = 0;
+        goto done;
+    }
+    resp = calloc(1, sizeof(*resp)); // resp is actually a pointer type
     if (!resp) {
-        return -1;
+        ret = ENOMEM;
+        goto done;
     }
     resp->header = initResponseBuffer();
     resp->body = initResponseBuffer();
     resp->body->content = buffer;
     resp->body->remaining = length;
     
-    if (!((url = prepareOPEN(bld->nn, bld->port, webFile->absPath, bld->userName, file->offset, length))
+    if (!((url = prepareOPEN(fs->nn, fs->port, file->file->absPath,
+                             fs->userName, off, length))
           && (resp = launchOPEN(url, resp))
           && ((openResult = parseOPEN(resp->header->content, resp->body->content)) > 0))) {
-        free(url);
-        freeResponseBuffer(resp->header);
         if (openResult == 0) {
-            return 0;
-        } else {
-            return -1;
+            // Special case: if parseOPEN returns 0, we asked for a byte range
+            // with outside what the file contains.  In this case, hdfsRead and
+            // hdfsPread return 0, meaning end-of-file.
+            *numRead = 0;
+            goto done;
         }
+        ret = EIO;
+        goto done;
     }
-    
-    size_t readSize = resp->body->offset;
-    file->offset += readSize;
-    
+    *numRead = resp->body->offset;
+
+done:
     freeResponseBuffer(resp->header);
     free(resp->body);
     free(resp);
     free(url);
-    return readSize;
+    return ret;
 }
 
-int hdfsAvailable(hdfsFS fs, hdfsFile file)
+tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length)
 {
-    if (!file || !fs) {
-        return -1;
-    }
-    struct webhdfsFileHandle *wf = (struct webhdfsFileHandle *) file->file;
-    if (!wf) {
-        return -1;
-    }
-    hdfsFileInfo *fileInfo = hdfsGetPathInfo(fs, wf->absPath);
-    if (fileInfo) {
-        int available = (int)(fileInfo->mSize - file->offset);
-        hdfsFreeFileInfo(fileInfo, 1);
-        return available;
-    } else {
+    int ret;
+    tSize numRead = 0;
+
+    ret = hdfsReadImpl(fs, file, buffer, file->offset, length, &numRead);
+    if (ret) {
+        errno = ret;
         return -1;
     }
+    file->offset += numRead; 
+    return numRead;
+}
+
+int hdfsAvailable(hdfsFS fs, hdfsFile file)
+{
+    /* We actually always block when reading from webhdfs, currently.  So the
+     * number of bytes that can be read without blocking is currently 0.
+     */
+    return 0;
+}
+
+int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
+{
+    errno = ENOTSUP;
+    return -1;
+}
+
+int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
+{
+    errno = ENOTSUP;
+    return -1;
 }
 
 int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos)
 {
-    if (!fs || !file || desiredPos < 0) {
-        return -1;
+    struct webhdfsFileHandle *wf;
+    hdfsFileInfo *fileInfo = NULL;
+    int ret = 0;
+
+    if (!fs || !file || (file->type == OUTPUT) || (desiredPos < 0)) {
+        ret = EINVAL;
+        goto done;
     }
-    struct webhdfsFileHandle *wf = (struct webhdfsFileHandle *) file->file;
+    wf = file->file;
     if (!wf) {
-        return -1;
+        ret = EINVAL;
+        goto done;
     }
-    hdfsFileInfo *fileInfo = hdfsGetPathInfo(fs, wf->absPath);
-    int ret = 0;
+    fileInfo = hdfsGetPathInfo(fs, wf->absPath);
+    if (!fileInfo) {
+        ret = errno;
+        goto done;
+    }
+    if (desiredPos > fileInfo->mSize) {
+        fprintf(stderr,
+                "hdfsSeek for %s failed since the desired position %" PRId64
+                " is beyond the size of the file %" PRId64 "\n",
+                wf->absPath, desiredPos, fileInfo->mSize);
+        ret = ENOTSUP;
+        goto done;
+    }
+    file->offset = desiredPos;
+
+done:
     if (fileInfo) {
-        if (fileInfo->mSize < desiredPos) {
-            errno = ENOTSUP;
-            fprintf(stderr,
-                    "hdfsSeek for %s failed since the desired position %lld is beyond the size of the file %lld\n",
-                    wf->absPath, desiredPos, fileInfo->mSize);
-            ret = -1;
-        } else {
-            file->offset = desiredPos;
-        }
         hdfsFreeFileInfo(fileInfo, 1);
-        return ret;
-    } else {
+    }
+    if (ret) {
+        errno = ret;
         return -1;
     }
+    return 0;
 }
 
 tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position, void* buffer, tSize length)
 {
-    if (!fs || !file || file->type != INPUT || position < 0 || !buffer || length < 0) {
+    int ret;
+    tSize numRead = 0;
+
+    if (position < 0) {
+        errno = EINVAL;
+        return -1;
+    }
+    ret = hdfsReadImpl(fs, file, buffer, position, length, &numRead);
+    if (ret) {
+        errno = ret;
         return -1;
     }
-    file->offset = position;
-    return hdfsRead(fs, file, buffer, length);
+    return numRead;
 }
 
 tOffset hdfsTell(hdfsFS fs, hdfsFile file)
 {
     if (!file) {
+        errno = EINVAL;
         return -1;
     }
     return file->offset;
@@ -1027,29 +1201,51 @@ tOffset hdfsTell(hdfsFS fs, hdfsFile fil
 char* hdfsGetWorkingDirectory(hdfsFS fs, char *buffer, size_t bufferSize)
 {
     if (fs == NULL || buffer == NULL ||  bufferSize <= 0) {
+        errno = EINVAL;
         return NULL;
     }
-    
-    struct hdfsBuilder * bld = (struct hdfsBuilder *) fs;
-    if (bld->workingDir) {
-        strncpy(buffer, bld->workingDir, bufferSize);
+    if (snprintf(buffer, bufferSize, "%s", fs->workingDir) >= bufferSize) {
+        errno = ENAMETOOLONG;
+        return NULL;
     }
     return buffer;
 }
 
 int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
 {
+    char *newWorkingDir;
+    size_t strlenPath, newWorkingDirLen;
+
     if (fs == NULL || path == NULL) {
+        errno = EINVAL;
         return -1;
     }
-    
-    struct hdfsBuilder * bld = (struct hdfsBuilder *) fs;
-    free(bld->workingDir);
-    bld->workingDir = (char *)malloc(strlen(path) + 1);
-    if (!(bld->workingDir)) {
+    strlenPath = strlen(path);
+    if (strlenPath < 1) {
+        errno = EINVAL;
+        return -1;
+    }
+    if (path[0] != '/') {
+        // TODO: support non-absolute paths.  They should be interpreted
+        // relative to the current path.
+        errno = ENOTSUP;
+        return -1;
+    }
+    if (strstr(path, "//")) {
+        // TODO: support non-normalized paths (by normalizing them.)
+        errno = ENOTSUP;
         return -1;
     }
-    strcpy(bld->workingDir, path);
+    newWorkingDirLen = strlenPath + 2;
+    newWorkingDir = malloc(newWorkingDirLen);
+    if (!newWorkingDir) {
+        errno = ENOMEM;
+        return -1;
+    }
+    snprintf(newWorkingDir, newWorkingDirLen, "%s%s",
+             path, (path[strlenPath - 1] == '/') ? "" : "/");
+    free(fs->workingDir);
+    fs->workingDir = newWorkingDir;
     return 0;
 }
 
@@ -1065,49 +1261,58 @@ void hdfsFreeHosts(char ***blockHosts)
     free(blockHosts);
 }
 
-/* not useful for libwebhdfs */
-int hdfsFileUsesDirectRead(hdfsFile file)
+tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
 {
-    /* return !!(file->flags & HDFS_FILE_SUPPORTS_DIRECT_READ); */
-    fprintf(stderr, "hdfsFileUsesDirectRead is no longer useful for libwebhdfs.\n");
+    errno = ENOTSUP;
     return -1;
 }
 
-/* not useful for libwebhdfs */
+int hdfsFileUsesDirectRead(hdfsFile file)
+{
+    return 0; // webhdfs never performs direct reads.
+}
+
 void hdfsFileDisableDirectRead(hdfsFile file)
 {
-    /* file->flags &= ~HDFS_FILE_SUPPORTS_DIRECT_READ; */
-    fprintf(stderr, "hdfsFileDisableDirectRead is no longer useful for libwebhdfs.\n");
+    // webhdfs never performs direct reads
 }
 
-/* not useful for libwebhdfs */
 int hdfsHFlush(hdfsFS fs, hdfsFile file)
 {
+    if (file->type != OUTPUT) {
+        errno = EINVAL; 
+        return -1;
+    }
+    // TODO: block until our write buffer is flushed
     return 0;
 }
 
-/* not useful for libwebhdfs */
 int hdfsFlush(hdfsFS fs, hdfsFile file)
 {
+    if (file->type != OUTPUT) {
+        errno = EINVAL; 
+        return -1;
+    }
+    // TODO: block until our write buffer is flushed
     return 0;
 }
 
 char*** hdfsGetHosts(hdfsFS fs, const char* path,
                      tOffset start, tOffset length)
 {
-    fprintf(stderr, "hdfsGetHosts is not but will be supported by libwebhdfs yet.\n");
+    errno = ENOTSUP;
     return NULL;
 }
 
 tOffset hdfsGetCapacity(hdfsFS fs)
 {
-    fprintf(stderr, "hdfsGetCapacity is not but will be supported by libwebhdfs.\n");
+    errno = ENOTSUP;
     return -1;
 }
 
 tOffset hdfsGetUsed(hdfsFS fs)
 {
-    fprintf(stderr, "hdfsGetUsed is not but will be supported by libwebhdfs yet.\n");
+    errno = ENOTSUP;
     return -1;
 }
 

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_multi_write.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_multi_write.c?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_multi_write.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_multi_write.c Thu Oct 11 06:14:26 2012
@@ -17,7 +17,7 @@
  */
 
 #include "expect.h"
-#include "webhdfs.h"
+#include "hdfs.h"
 
 #include <errno.h>
 #include <semaphore.h>

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c Thu Oct 11 06:14:26 2012
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-#include "webhdfs.h"
+#include "hdfs.h"
 
 #include <inttypes.h>
 #include <jni.h>

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c Thu Oct 11 06:14:26 2012
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-#include "webhdfs.h"
+#include "hdfs.h"
 
 #include <stdio.h>
 #include <stdlib.h>

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c Thu Oct 11 06:14:26 2012
@@ -17,7 +17,7 @@
  */
 
 #include "expect.h"
-#include "webhdfs.h"
+#include "hdfs.h"
 
 #include <errno.h>
 #include <semaphore.h>

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c Thu Oct 11 06:14:26 2012
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-#include "webhdfs.h"
+#include "hdfs.h"
 
 #include <limits.h>
 #include <stdio.h>

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_read_bm.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_read_bm.c?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_read_bm.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_read_bm.c Thu Oct 11 06:14:26 2012
@@ -1,8 +1,9 @@
+#include "hdfs.h"
+
 #include <time.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <sys/time.h>
-#include "webhdfs.h"
 
 #ifdef __MACH__
 #include <mach/clock.h>

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs Thu Oct 11 06:14:26 2012
@@ -17,7 +17,7 @@
 
 bin=`which $0`
 bin=`dirname ${bin}`
-bin=`cd "$bin"; pwd`
+bin=`cd "$bin" > /dev/null; pwd`
 
 DEFAULT_LIBEXEC_DIR="$bin"/../libexec
 HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/conf/hadoop-metrics2.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/conf/hadoop-metrics2.properties?rev=1396918&r1=1396917&r2=1396918&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/conf/hadoop-metrics2.properties (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/conf/hadoop-metrics2.properties Thu Oct 11 06:14:26 2012
@@ -19,7 +19,7 @@
 # See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
 
 *.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
-# default sampling period
+# default sampling period, in seconds
 *.period=10
 
 # The namenode-metrics.out will contain metrics from all context