You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2009/07/31 03:19:16 UTC

svn commit: r799480 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/test/hdfs/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/test/hdfs/org/apache/hadoo...

Author: szetszwo
Date: Fri Jul 31 01:19:15 2009
New Revision: 799480

URL: http://svn.apache.org/viewvc?rev=799480&view=rev
Log:
HDFS-514. Change DFSClient.namenode from public to private.  Contributed by Bill Zeller

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Jul 31 01:19:15 2009
@@ -60,6 +60,9 @@
     HDFS-500. Deprecate NameNode methods deprecated in NameNodeProtocol.
     (Jakob Homan via shv)
 
+    HDFS-514. Change DFSClient.namenode from public to private.  (Bill Zeller
+    via szetszwo)
+
   BUG FIXES
     HDFS-76. Better error message to users when commands fail because of 
     lack of quota. Allow quota to be set even if the limit is lower than

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java Fri Jul 31 01:19:15 2009
@@ -127,7 +127,7 @@
   public static final Log LOG = LogFactory.getLog(DFSClient.class);
   public static final int MAX_BLOCK_ACQUIRE_FAILURES = 3;
   private static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
-  public final ClientProtocol namenode;
+  final private ClientProtocol namenode;
   final private ClientProtocol rpcNamenode;
   final UnixUserGroupInformation ugi;
   volatile boolean clientRunning = true;
@@ -430,6 +430,14 @@
     return create(src, overwrite, replication, blockSize, null);
   }
 
+  /**
+   * Get the namenode associated with this DFSClient object
+   * @return the namenode associated with this DFSClient object
+   */
+  public ClientProtocol getNamenode() {
+    return namenode;
+  }
+  
   
   /**
    * Create a new dfs file with the specified block replication 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Fri Jul 31 01:19:15 2009
@@ -65,7 +65,7 @@
       JspHelper.printGotoForm(out, namenodeInfoPort, target);
     } else {
       if (!targetStatus.isDir()) { // a file
-        List<LocatedBlock> blocks = dfs.namenode.getBlockLocations(dir, 0, 1)
+        List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(dir, 0, 1)
             .getLocatedBlocks();
 
         LocatedBlock firstBlock = null;
@@ -205,7 +205,7 @@
 
     final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(),
         JspHelper.conf);
-    List<LocatedBlock> blocks = dfs.namenode.getBlockLocations(filename, 0,
+    List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
         Long.MAX_VALUE).getLocatedBlocks();
     // Add the various links for looking at the file contents
     // URL for downloading the full file
@@ -320,7 +320,7 @@
     AccessToken accessToken = AccessToken.DUMMY_TOKEN;
     if (JspHelper.conf.getBoolean(
         AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, false)) {
-      List<LocatedBlock> blks = dfs.namenode.getBlockLocations(filename, 0,
+      List<LocatedBlock> blks = dfs.getNamenode().getBlockLocations(filename, 0,
           Long.MAX_VALUE).getLocatedBlocks();
       if (blks == null || blks.size() == 0) {
         out.print("Can't locate file blocks");
@@ -390,7 +390,7 @@
     // determine data for the next link
     if (startOffset + chunkSizeToView >= blockSize) {
       // we have to go to the next block from this point onwards
-      List<LocatedBlock> blocks = dfs.namenode.getBlockLocations(filename, 0,
+      List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
           Long.MAX_VALUE).getLocatedBlocks();
       for (int i = 0; i < blocks.size(); i++) {
         if (blocks.get(i).getBlock().getBlockId() == blockId) {
@@ -440,7 +440,7 @@
     int prevPort = req.getServerPort();
     int prevDatanodePort = datanodePort;
     if (startOffset == 0) {
-      List<LocatedBlock> blocks = dfs.namenode.getBlockLocations(filename, 0,
+      List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
           Long.MAX_VALUE).getLocatedBlocks();
       for (int i = 0; i < blocks.size(); i++) {
         if (blocks.get(i).getBlock().getBlockId() == blockId) {
@@ -546,7 +546,7 @@
     // fetch the block from the datanode that has the last block for this file
     final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(),
         JspHelper.conf);
-    List<LocatedBlock> blocks = dfs.namenode.getBlockLocations(filename, 0,
+    List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
         Long.MAX_VALUE).getLocatedBlocks();
     if (blocks == null || blocks.size() == 0) {
       out.print("No datanodes contain blocks of file " + filename);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java Fri Jul 31 01:19:15 2009
@@ -50,10 +50,10 @@
   
       //try reading the block by someone
       DFSClient dfsclient = new DFSClient(CONF);
-      LocatedBlocks blocks = dfsclient.namenode.getBlockLocations(src, 0, 1);
+      LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, 1);
       LocatedBlock b = blocks.get(0); 
       try {
-        dfsclient.namenode.abandonBlock(b.getBlock(), src, "someone");
+        dfsclient.getNamenode().abandonBlock(b.getBlock(), src, "someone");
         //previous line should throw an exception.
         assertTrue(false);
       }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Fri Jul 31 01:19:15 2009
@@ -171,7 +171,7 @@
     dfsClient = new DFSClient(new InetSocketAddress("localhost", 
                                         cluster.getNameNodePort()), conf);
     do {
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       blockCount = blocks.get(0).getLocations().length;
       try {
@@ -190,7 +190,7 @@
 
     // We have 2 good replicas and block is not corrupt
     do {
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       blockCount = blocks.get(0).getLocations().length;
       try {
@@ -218,7 +218,7 @@
     // We now have the blocks to be marked as corrupt and we get back all
     // its replicas
     do {
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       blockCount = blocks.get(0).getLocations().length;
       try {
@@ -282,7 +282,7 @@
     
     dfsClient = new DFSClient(new InetSocketAddress("localhost", 
                                         cluster.getNameNodePort()), conf);
-    blocks = dfsClient.namenode.
+    blocks = dfsClient.getNamenode().
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     replicaCount = blocks.get(0).getLocations().length;
 
@@ -294,7 +294,7 @@
         Thread.sleep(1000);
       } catch (InterruptedException ignore) {
       }
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;
     }
@@ -332,7 +332,7 @@
     }
     
     // Loop until the block recovers after replication
-    blocks = dfsClient.namenode.
+    blocks = dfsClient.getNamenode().
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     replicaCount = blocks.get(0).getLocations().length;
     while (replicaCount != numReplicas) {
@@ -341,7 +341,7 @@
         Thread.sleep(1000);
       } catch (InterruptedException ignore) {
       }
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                  getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;
     }
@@ -358,7 +358,7 @@
       }
       corruptReplicaSize = cluster.getNamesystem().
                             numCorruptReplicas(blk);
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                  getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;
     }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java Fri Jul 31 01:19:15 2009
@@ -168,7 +168,7 @@
       assertTrue("There should be only one datanode but found " + dn.length,
                   dn.length == 1);
 
-      LocatedBlocks locations = client.namenode.getBlockLocations(
+      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
       List<LocatedBlock> blocks = locations.getLocatedBlocks();
       FSDataset dataset = (FSDataset) dn[0].data;

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java Fri Jul 31 01:19:15 2009
@@ -160,7 +160,7 @@
 
     //b. Log into one datanode that has one replica of this block.
     //   Find the block file on this datanode and truncate it to zero size.
-    final LocatedBlocks locatedblocks = fs.dfs.namenode.getBlockLocations(p.toString(), 0L, len1);
+    final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(p.toString(), 0L, len1);
     assertEquals(1, locatedblocks.locatedBlockCount());
     final LocatedBlock lb = locatedblocks.get(0);
     final Block blk = lb.getBlock();
@@ -224,7 +224,7 @@
 
     //check block sizes 
     final long len = fs.getFileStatus(pnew).getLen();
-    final LocatedBlocks locatedblocks = fs.dfs.namenode.getBlockLocations(pnew.toString(), 0L, len);
+    final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(pnew.toString(), 0L, len);
     final int numblock = locatedblocks.locatedBlockCount();
     for(int i = 0; i < numblock; i++) {
       final LocatedBlock lb = locatedblocks.get(i);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Fri Jul 31 01:19:15 2009
@@ -372,7 +372,7 @@
 
       // verify that no blocks are associated with this file
       // bad block allocations were cleaned up earlier.
-      LocatedBlocks locations = client.namenode.getBlockLocations(
+      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up",
@@ -411,18 +411,18 @@
       System.out.println("testFileCreationError2: "
                          + "Created file filestatus.dat with one replicas.");
 
-      LocatedBlocks locations = client.namenode.getBlockLocations(
+      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
       System.out.println("testFileCreationError2: "
           + "The file has " + locations.locatedBlockCount() + " blocks.");
 
       // add another block to the file
-      LocatedBlock location = client.namenode.addBlock(file1.toString(), 
+      LocatedBlock location = client.getNamenode().addBlock(file1.toString(), 
           client.clientName);
       System.out.println("testFileCreationError2: "
           + "Added block " + location.getBlock());
 
-      locations = client.namenode.getBlockLocations(file1.toString(), 
+      locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                     0, Long.MAX_VALUE);
       int count = locations.locatedBlockCount();
       System.out.println("testFileCreationError2: "
@@ -439,7 +439,7 @@
       }
 
       // verify that the last block was synchronized.
-      locations = client.namenode.getBlockLocations(file1.toString(), 
+      locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                     0, Long.MAX_VALUE);
       System.out.println("testFileCreationError2: "
           + "locations = " + locations.locatedBlockCount());
@@ -567,14 +567,14 @@
 
       // verify that new block is associated with this file
       DFSClient client = ((DistributedFileSystem)fs).dfs;
-      LocatedBlocks locations = client.namenode.getBlockLocations(
+      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up for file " + file1,
                  locations.locatedBlockCount() == 3);
 
       // verify filestatus2.dat
-      locations = client.namenode.getBlockLocations(
+      locations = client.getNamenode().getBlockLocations(
                                   file2.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up for file " + file2,
@@ -790,7 +790,7 @@
       // wait for the lease to expire
       try {Thread.sleep(5 * leasePeriod);} catch (InterruptedException e) {}
 
-      LocatedBlocks locations = dfs.dfs.namenode.getBlockLocations(
+      LocatedBlocks locations = dfs.dfs.getNamenode().getBlockLocations(
           f, 0, Long.MAX_VALUE);
       assertEquals(1, locations.locatedBlockCount());
       LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java Fri Jul 31 01:19:15 2009
@@ -76,7 +76,7 @@
       boolean notWritten;
       do {
         DFSClient dfsclient = new DFSClient(CONF);
-        locatedBlocks = dfsclient.namenode.
+        locatedBlocks = dfsclient.getNamenode().
           getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
         assertEquals(2, locatedBlocks.size());
         notWritten = false;

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java Fri Jul 31 01:19:15 2009
@@ -148,7 +148,7 @@
       writeFile(cluster.getFileSystem(), testPath, numDataNodes);
 
       
-      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, 20);
+      waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
 
       
       Block[][] blocksList = cluster.getAllBlockReports();
@@ -188,7 +188,7 @@
                                   cluster.getNameNodePort()),
                                   conf);
       
-      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);
+      waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
       
     } finally {
       if (cluster != null) {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java Fri Jul 31 01:19:15 2009
@@ -75,7 +75,7 @@
 
       //get block info for the last block
       LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
-          dfs.dfs.namenode, filestr);
+          dfs.dfs.getNamenode(), filestr);
       DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
       assertEquals(REPLICATION_NUM, datanodeinfos.length);
 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java Fri Jul 31 01:19:15 2009
@@ -171,7 +171,7 @@
     fs.setReplication(file1, (short)2);
   
     // Now get block details and check if the block is corrupt
-    blocks = dfsClient.namenode.
+    blocks = dfsClient.getNamenode().
               getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     while (blocks.get(0).isCorrupt() != true) {
       try {
@@ -179,7 +179,7 @@
         Thread.sleep(1000);
       } catch (InterruptedException ie) {
       }
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                 getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     }
     replicaCount = blocks.get(0).getLocations().length;
@@ -321,10 +321,10 @@
       out.write(buffer);
       out.close();
       
-      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);
+      waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
 
       // get first block of the file.
-      String block = dfsClient.namenode.
+      String block = dfsClient.getNamenode().
                        getBlockLocations(testFile, 0, Long.MAX_VALUE).
                        get(0).getBlock().getBlockName();
       
@@ -386,7 +386,7 @@
                                   cluster.getNameNodePort()),
                                   conf);
       
-      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);
+      waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
       
     } finally {
       if (cluster != null) {
@@ -436,19 +436,19 @@
     // block replication triggers corrupt block detection
     DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", 
         cluster.getNameNodePort()), fs.getConf());
-    LocatedBlocks blocks = dfsClient.namenode.getBlockLocations(
+    LocatedBlocks blocks = dfsClient.getNamenode().getBlockLocations(
         fileName.toString(), 0, fileLen);
     if (lenDelta < 0) { // replica truncated
     	while (!blocks.get(0).isCorrupt() || 
     			REPLICATION_FACTOR != blocks.get(0).getLocations().length) {
     		Thread.sleep(100);
-    		blocks = dfsClient.namenode.getBlockLocations(
+    		blocks = dfsClient.getNamenode().getBlockLocations(
     				fileName.toString(), 0, fileLen);
     	}
     } else { // no corruption detected; block replicated
     	while (REPLICATION_FACTOR+1 != blocks.get(0).getLocations().length) {
     		Thread.sleep(100);
-    		blocks = dfsClient.namenode.getBlockLocations(
+    		blocks = dfsClient.getNamenode().getBlockLocations(
     				fileName.toString(), 0, fileLen);
     	}
     }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Fri Jul 31 01:19:15 2009
@@ -110,7 +110,7 @@
       InetSocketAddress addr = new InetSocketAddress("localhost",
           cluster.getNameNodePort());
       DFSClient client = new DFSClient(addr, CONF);
-      List<LocatedBlock> locatedBlocks = client.namenode.
+      List<LocatedBlock> locatedBlocks = client.getNamenode().
         getBlockLocations("/tmp.txt", 0, DEFAULT_BLOCK_SIZE).getLocatedBlocks();
       assertEquals(1, locatedBlocks.size());
       LocatedBlock block = locatedBlocks.get(0);
@@ -194,7 +194,7 @@
         Thread.sleep(100);
       } catch(InterruptedException e) {
       }
-      List<LocatedBlock> blocks = client.namenode.
+      List<LocatedBlock> blocks = client.getNamenode().
       getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();
       assertEquals(1, blocks.size());
       DatanodeInfo[] nodes = blocks.get(0).getLocations();

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java Fri Jul 31 01:19:15 2009
@@ -83,7 +83,7 @@
       assertTrue(dfs.getClient().exists(filestr));
 
       //get block info
-      LocatedBlock locatedblock = getLastLocatedBlock(dfs.getClient().namenode, filestr);
+      LocatedBlock locatedblock = getLastLocatedBlock(dfs.getClient().getNamenode(), filestr);
       DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
       assertTrue(datanodeinfo.length > 0);
 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=799480&r1=799479&r2=799480&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Fri Jul 31 01:19:15 2009
@@ -175,7 +175,7 @@
       String[] fileNames = util.getFileNames(topDir);
       DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                           cluster.getNameNodePort()), conf);
-      String block = dfsClient.namenode.
+      String block = dfsClient.getNamenode().
                       getBlockLocations(fileNames[0], 0, Long.MAX_VALUE).
                       get(0).getBlock().getBlockName();
       File baseDir = new File(System.getProperty("test.build.data",
@@ -315,7 +315,7 @@
 
     dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                cluster.getNameNodePort()), conf);
-    blocks = dfsClient.namenode.
+    blocks = dfsClient.getNamenode().
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     replicaCount = blocks.get(0).getLocations().length;
     while (replicaCount != 3) {
@@ -323,7 +323,7 @@
         Thread.sleep(100);
       } catch (InterruptedException ignore) {
       }
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                 getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;
     }