You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by gk...@apache.org on 2012/08/03 21:00:59 UTC

svn commit: r1369164 [8/16] - in /hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/or...

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java Fri Aug  3 19:00:15 2012
@@ -17,17 +17,20 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.metrics;
 
+import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
+import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
+
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
-import static org.apache.hadoop.metrics2.impl.MsInfo.*;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MutableQuantiles;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 
@@ -57,15 +60,31 @@ public class NameNodeMetrics {
 
   @Metric("Journal transactions") MutableRate transactions;
   @Metric("Journal syncs") MutableRate syncs;
+  MutableQuantiles[] syncsQuantiles;
   @Metric("Journal transactions batched in sync")
   MutableCounterLong transactionsBatchedInSync;
   @Metric("Block report") MutableRate blockReport;
+  MutableQuantiles[] blockReportQuantiles;
 
   @Metric("Duration in SafeMode at startup") MutableGaugeInt safeModeTime;
   @Metric("Time loading FS Image at startup") MutableGaugeInt fsImageLoadTime;
 
-  NameNodeMetrics(String processName, String sessionId) {
+  NameNodeMetrics(String processName, String sessionId, int[] intervals) {
     registry.tag(ProcessName, processName).tag(SessionId, sessionId);
+    
+    final int len = intervals.length;
+    syncsQuantiles = new MutableQuantiles[len];
+    blockReportQuantiles = new MutableQuantiles[len];
+    
+    for (int i = 0; i < len; i++) {
+      int interval = intervals[i];
+      syncsQuantiles[i] = registry.newQuantiles(
+          "syncs" + interval + "s",
+          "Journal syncs", "ops", "latency", interval);
+      blockReportQuantiles[i] = registry.newQuantiles(
+          "blockReport" + interval + "s", 
+          "Block report", "ops", "latency", interval);
+    }
   }
 
   public static NameNodeMetrics create(Configuration conf, NamenodeRole r) {
@@ -73,7 +92,11 @@ public class NameNodeMetrics {
     String processName = r.toString();
     MetricsSystem ms = DefaultMetricsSystem.instance();
     JvmMetrics.create(processName, sessionId, ms);
-    return ms.register(new NameNodeMetrics(processName, sessionId));
+    
+    // Percentile measurement is off by default, by watching no intervals
+    int[] intervals = 
+        conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
+    return ms.register(new NameNodeMetrics(processName, sessionId, intervals));
   }
 
   public void shutdown() {
@@ -146,6 +169,9 @@ public class NameNodeMetrics {
 
   public void addSync(long elapsed) {
     syncs.add(elapsed);
+    for (MutableQuantiles q : syncsQuantiles) {
+      q.add(elapsed);
+    }
   }
 
   public void setFsImageLoadTime(long elapsed) {
@@ -154,6 +180,9 @@ public class NameNodeMetrics {
 
   public void addBlockReport(long latency) {
     blockReport.add(latency);
+    for (MutableQuantiles q : blockReportQuantiles) {
+      q.add(latency);
+    }
   }
 
   public void setSafeModeTime(long elapsed) {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java Fri Aug  3 19:00:15 2012
@@ -77,6 +77,7 @@ public class NamespaceInfo extends Stora
     return softwareVersion;
   }
 
+  @Override
   public String toString(){
     return super.toString() + ";bpid=" + blockPoolID;
   }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java Fri Aug  3 19:00:15 2012
@@ -42,5 +42,6 @@ public interface NodeRegistration {
    */
   public int getVersion();
 
+  @Override
   public String toString();
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java Fri Aug  3 19:00:15 2012
@@ -82,6 +82,7 @@ public class ReceivedDeletedBlockInfo {
     return status;
   }
 
+  @Override
   public boolean equals(Object o) {
     if (!(o instanceof ReceivedDeletedBlockInfo)) {
       return false;
@@ -93,6 +94,7 @@ public class ReceivedDeletedBlockInfo {
             this.delHints != null && this.delHints.equals(other.delHints));
   }
 
+  @Override
   public int hashCode() {
     assert false : "hashCode not designed";
     return 0; 
@@ -106,6 +108,7 @@ public class ReceivedDeletedBlockInfo {
     return status == BlockStatus.DELETED_BLOCK;
   }
 
+  @Override
   public String toString() {
     return block.toString() + ", status: " + status +
       ", delHint: " + delHints;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java Fri Aug  3 19:00:15 2012
@@ -115,6 +115,7 @@ public class DFSck extends Configured im
   /**
    * @param args
    */
+  @Override
   public int run(final String[] args) throws IOException {
     if (args.length == 0) {
       printUsage();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Fri Aug  3 19:00:15 2012
@@ -259,7 +259,6 @@ public class DelegationTokenFetcher {
     try {
       URL url = new URL(buf.toString());
       connection = (HttpURLConnection) SecurityUtil.openSecureHttpConnection(url);
-      connection = (HttpURLConnection)URLUtils.openConnection(url);
       if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
         throw new IOException("Error renewing token: " + 
             connection.getResponseMessage());

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java Fri Aug  3 19:00:15 2012
@@ -57,6 +57,7 @@ class OfflineEditsBinaryLoader implement
   /**
    * Loads edits file, uses visitor to process all elements
    */
+  @Override
   public void loadEdits() throws IOException {
     visitor.start(inputStream.getVersion());
     while (true) {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java Fri Aug  3 19:00:15 2012
@@ -77,6 +77,7 @@ class OfflineEditsXmlLoader 
   /**
    * Loads edits file, uses visitor to process all elements
    */
+  @Override
   public void loadEdits() throws IOException {
     try {
       XMLReader xr = XMLReaderFactory.createXMLReader();
@@ -120,6 +121,7 @@ class OfflineEditsXmlLoader 
     }
   }
   
+  @Override
   public void startElement (String uri, String name,
       String qName, Attributes atts) {
     switch (state) {
@@ -168,6 +170,7 @@ class OfflineEditsXmlLoader 
     }
   }
   
+  @Override
   public void endElement (String uri, String name, String qName) {
     String str = cbuf.toString().trim();
     cbuf = new StringBuffer();
@@ -248,6 +251,7 @@ class OfflineEditsXmlLoader 
     }
   }
   
+  @Override
   public void characters (char ch[], int start, int length) {
     cbuf.append(ch, start, length);
   }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java Fri Aug  3 19:00:15 2012
@@ -59,6 +59,7 @@ class IndentedImageVisitor extends TextW
     write(element + " = " + value + "\n");
   }
 
+  @Override
   void visit(ImageElement element, long value) throws IOException {
     if ((element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME) || 
         (element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE) || 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/CyclicIteration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/CyclicIteration.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/CyclicIteration.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/CyclicIteration.java Fri Aug  3 19:00:15 2012
@@ -54,6 +54,7 @@ public class CyclicIteration<K, V> imple
   }
 
   /** {@inheritDoc} */
+  @Override
   public Iterator<Map.Entry<K, V>> iterator() {
     return new CyclicIterator();
   }
@@ -89,11 +90,13 @@ public class CyclicIteration<K, V> imple
     }
 
     /** {@inheritDoc} */
+    @Override
     public boolean hasNext() {
       return hasnext;
     }
 
     /** {@inheritDoc} */
+    @Override
     public Map.Entry<K, V> next() {
       if (!hasnext) {
         throw new NoSuchElementException();
@@ -106,6 +109,7 @@ public class CyclicIteration<K, V> imple
     }
 
     /** Not supported */
+    @Override
     public void remove() {
       throw new UnsupportedOperationException("Not supported");
     }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java Fri Aug  3 19:00:15 2012
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.util;
 
-import static org.apache.hadoop.hdfs.server.common.Util.monotonicNow;
+import static org.apache.hadoop.util.Time.monotonicNow;
 
 /** 
  * a class to throttle the data transfers.

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightGSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightGSet.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightGSet.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightGSet.java Fri Aug  3 19:00:15 2012
@@ -79,7 +79,9 @@ public class LightWeightGSet<K, E extend
    */
   public LightWeightGSet(final int recommended_length) {
     final int actual = actualArrayLength(recommended_length);
-    LOG.info("recommended=" + recommended_length + ", actual=" + actual);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("recommended=" + recommended_length + ", actual=" + actual);
+    }
 
     entries = new LinkedElement[actual];
     hash_mask = entries.length - 1;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java Fri Aug  3 19:00:15 2012
@@ -55,6 +55,7 @@ public class LightWeightHashSet<T> imple
       this.hashCode = hash;
     }
 
+    @Override
     public String toString() {
       return element.toString();
     }
@@ -142,6 +143,7 @@ public class LightWeightHashSet<T> imple
    *
    * @return true is set empty, false otherwise
    */
+  @Override
   public boolean isEmpty() {
     return size == 0;
   }
@@ -156,6 +158,7 @@ public class LightWeightHashSet<T> imple
   /**
    * Return the number of stored elements.
    */
+  @Override
   public int size() {
     return size;
   }
@@ -217,6 +220,7 @@ public class LightWeightHashSet<T> imple
    * @param toAdd - elements to add.
    * @return true if the set has changed, false otherwise
    */
+  @Override
   public boolean addAll(Collection<? extends T> toAdd) {
     boolean changed = false;
     for (T elem : toAdd) {
@@ -231,6 +235,7 @@ public class LightWeightHashSet<T> imple
    *
    * @return true if the element was not present in the table, false otherwise
    */
+  @Override
   public boolean add(final T element) {
     boolean added = addElem(element);
     expandIfNecessary();
@@ -270,6 +275,7 @@ public class LightWeightHashSet<T> imple
    *
    * @return If such element exists, return true. Otherwise, return false.
    */
+  @Override
   @SuppressWarnings("unchecked")
   public boolean remove(final Object key) {
     // validate key
@@ -489,6 +495,7 @@ public class LightWeightHashSet<T> imple
     }
   }
 
+  @Override
   public Iterator<T> iterator() {
     return new LinkedSetIterator();
   }
@@ -560,6 +567,7 @@ public class LightWeightHashSet<T> imple
   /**
    * Clear the set. Resize it to the original capacity.
    */
+  @Override
   @SuppressWarnings("unchecked")
   public void clear() {
     this.capacity = this.initialCapacity;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java Fri Aug  3 19:00:15 2012
@@ -47,6 +47,7 @@ public class LightWeightLinkedSet<T> ext
       this.after = null;
     }
 
+    @Override
     public String toString() {
       return super.toString();
     }
@@ -79,6 +80,7 @@ public class LightWeightLinkedSet<T> ext
    *
    * @return true if the element was not present in the table, false otherwise
    */
+  @Override
   protected boolean addElem(final T element) {
     // validate element
     if (element == null) {
@@ -118,6 +120,7 @@ public class LightWeightLinkedSet<T> ext
    *
    * @return Return the entry with the element if exists. Otherwise return null.
    */
+  @Override
   protected DoubleLinkedElement<T> removeElem(final T key) {
     DoubleLinkedElement<T> found = (DoubleLinkedElement<T>) (super
         .removeElem(key));
@@ -162,6 +165,7 @@ public class LightWeightLinkedSet<T> ext
    *
    * @return first element
    */
+  @Override
   public List<T> pollN(int n) {
     if (n >= size) {
       // if we need to remove all elements then do fast polling
@@ -182,6 +186,7 @@ public class LightWeightLinkedSet<T> ext
    * link list, don't worry about hashtable - faster version of the parent
    * method.
    */
+  @Override
   public List<T> pollAll() {
     List<T> retList = new ArrayList<T>(size);
     while (head != null) {
@@ -212,6 +217,7 @@ public class LightWeightLinkedSet<T> ext
     return a;
   }
 
+  @Override
   public Iterator<T> iterator() {
     return new LinkedSetIterator();
   }
@@ -251,6 +257,7 @@ public class LightWeightLinkedSet<T> ext
   /**
    * Clear the set. Resize it to the original capacity.
    */
+  @Override
   public void clear() {
     super.clear();
     this.head = null;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java Fri Aug  3 19:00:15 2012
@@ -146,6 +146,7 @@ public class XMLUtils {
     /** 
      * Convert a stanza to a human-readable string.
      */
+    @Override
     public String toString() {
       StringBuilder bld = new StringBuilder();
       bld.append("{");

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Fri Aug  3 19:00:15 2012
@@ -55,6 +55,7 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hdfs.ByteRangeInputStream;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
@@ -88,6 +89,7 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
@@ -147,6 +149,7 @@ public class WebHdfsFileSystem extends F
   private URI uri;
   private Token<?> delegationToken;
   private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
+  private RetryPolicy retryPolicy = null;
   private Path workingDir;
 
   {
@@ -179,6 +182,7 @@ public class WebHdfsFileSystem extends F
       throw new IllegalArgumentException(e);
     }
     this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
+    this.retryPolicy = NameNodeProxies.getDefaultRetryPolicy(conf);
     this.workingDir = getHomeDirectory();
 
     if (UserGroupInformation.isSecurityEnabled()) {
@@ -276,39 +280,64 @@ public class WebHdfsFileSystem extends F
   }
 
   private static Map<?, ?> validateResponse(final HttpOpParam.Op op,
-      final HttpURLConnection conn) throws IOException {
+      final HttpURLConnection conn, boolean unwrapException) throws IOException {
     final int code = conn.getResponseCode();
     if (code != op.getExpectedHttpResponseCode()) {
       final Map<?, ?> m;
       try {
         m = jsonParse(conn, true);
-      } catch(IOException e) {
+      } catch(Exception e) {
         throw new IOException("Unexpected HTTP response: code=" + code + " != "
             + op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
             + ", message=" + conn.getResponseMessage(), e);
       }
 
-      if (m.get(RemoteException.class.getSimpleName()) == null) {
+      if (m == null) {
+        throw new IOException("Unexpected HTTP response: code=" + code + " != "
+            + op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
+            + ", message=" + conn.getResponseMessage());
+      } else if (m.get(RemoteException.class.getSimpleName()) == null) {
         return m;
       }
 
       final RemoteException re = JsonUtil.toRemoteException(m);
-      throw re.unwrapRemoteException(AccessControlException.class,
-          InvalidToken.class,
-          AuthenticationException.class,
-          AuthorizationException.class,
-          FileAlreadyExistsException.class,
-          FileNotFoundException.class,
-          ParentNotDirectoryException.class,
-          UnresolvedPathException.class,
-          SafeModeException.class,
-          DSQuotaExceededException.class,
-          NSQuotaExceededException.class);
+      throw unwrapException? toIOException(re): re;
     }
     return null;
   }
 
   /**
+   * Covert an exception to an IOException.
+   * 
+   * For a non-IOException, wrap it with IOException.
+   * For a RemoteException, unwrap it.
+   * For an IOException which is not a RemoteException, return it. 
+   */
+  private static IOException toIOException(Exception e) {
+    if (!(e instanceof IOException)) {
+      return new IOException(e);
+    }
+
+    final IOException ioe = (IOException)e;
+    if (!(ioe instanceof RemoteException)) {
+      return ioe;
+    }
+
+    final RemoteException re = (RemoteException)ioe;
+    return re.unwrapRemoteException(AccessControlException.class,
+        InvalidToken.class,
+        AuthenticationException.class,
+        AuthorizationException.class,
+        FileAlreadyExistsException.class,
+        FileNotFoundException.class,
+        ParentNotDirectoryException.class,
+        UnresolvedPathException.class,
+        SafeModeException.class,
+        DSQuotaExceededException.class,
+        NSQuotaExceededException.class);
+  }
+
+  /**
    * Return a URL pointing to given path on the namenode.
    *
    * @param path to obtain the URL for
@@ -362,69 +391,15 @@ public class WebHdfsFileSystem extends F
   }
 
   private HttpURLConnection getHttpUrlConnection(URL url)
-      throws IOException {
+      throws IOException, AuthenticationException {
     final HttpURLConnection conn;
-    try {
-      if (ugi.hasKerberosCredentials()) { 
-        conn = new AuthenticatedURL(AUTH).openConnection(url, authToken);
-      } else {
-        conn = (HttpURLConnection)url.openConnection();
-      }
-    } catch (AuthenticationException e) {
-      throw new IOException("Authentication failed, url=" + url, e);
+    if (ugi.hasKerberosCredentials()) { 
+      conn = new AuthenticatedURL(AUTH).openConnection(url, authToken);
+    } else {
+      conn = (HttpURLConnection)url.openConnection();
     }
     return conn;
   }
-  
-  private HttpURLConnection httpConnect(final HttpOpParam.Op op, final Path fspath,
-      final Param<?,?>... parameters) throws IOException {
-    final URL url = toUrl(op, fspath, parameters);
-
-    //connect and get response
-    HttpURLConnection conn = getHttpUrlConnection(url);
-    try {
-      conn.setRequestMethod(op.getType().toString());
-      if (op.getDoOutput()) {
-        conn = twoStepWrite(conn, op);
-        conn.setRequestProperty("Content-Type", "application/octet-stream");
-      }
-      conn.setDoOutput(op.getDoOutput());
-      conn.connect();
-      return conn;
-    } catch (IOException e) {
-      conn.disconnect();
-      throw e;
-    }
-  }
-  
-  /**
-   * Two-step Create/Append:
-   * Step 1) Submit a Http request with neither auto-redirect nor data. 
-   * Step 2) Submit another Http request with the URL from the Location header with data.
-   * 
-   * The reason of having two-step create/append is for preventing clients to
-   * send out the data before the redirect. This issue is addressed by the
-   * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
-   * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
-   * and Java 6 http client), which do not correctly implement "Expect:
-   * 100-continue". The two-step create/append is a temporary workaround for
-   * the software library bugs.
-   */
-  static HttpURLConnection twoStepWrite(HttpURLConnection conn,
-      final HttpOpParam.Op op) throws IOException {
-    //Step 1) Submit a Http request with neither auto-redirect nor data. 
-    conn.setInstanceFollowRedirects(false);
-    conn.setDoOutput(false);
-    conn.connect();
-    validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn);
-    final String redirect = conn.getHeaderField("Location");
-    conn.disconnect();
-
-    //Step 2) Submit another Http request with the URL from the Location header with data.
-    conn = (HttpURLConnection)new URL(redirect).openConnection();
-    conn.setRequestMethod(op.getType().toString());
-    return conn;
-  }
 
   /**
    * Run a http operation.
@@ -438,12 +413,161 @@ public class WebHdfsFileSystem extends F
    */
   private Map<?, ?> run(final HttpOpParam.Op op, final Path fspath,
       final Param<?,?>... parameters) throws IOException {
-    final HttpURLConnection conn = httpConnect(op, fspath, parameters);
-    try {
-      final Map<?, ?> m = validateResponse(op, conn);
-      return m != null? m: jsonParse(conn, false);
-    } finally {
-      conn.disconnect();
+    return new Runner(op, fspath, parameters).run().json;
+  }
+
+  /**
+   * This class is for initialing a HTTP connection, connecting to server,
+   * obtaining a response, and also handling retry on failures.
+   */
+  class Runner {
+    private final HttpOpParam.Op op;
+    private final URL url;
+    private final boolean redirected;
+
+    private boolean checkRetry;
+    private HttpURLConnection conn = null;
+    private Map<?, ?> json = null;
+
+    Runner(final HttpOpParam.Op op, final URL url, final boolean redirected) {
+      this.op = op;
+      this.url = url;
+      this.redirected = redirected;
+    }
+
+    Runner(final HttpOpParam.Op op, final Path fspath,
+        final Param<?,?>... parameters) throws IOException {
+      this(op, toUrl(op, fspath, parameters), false);
+    }
+
+    Runner(final HttpOpParam.Op op, final HttpURLConnection conn) {
+      this(op, null, false);
+      this.conn = conn;
+    }
+
+    private void init() throws IOException {
+      checkRetry = !redirected;
+      try {
+        conn = getHttpUrlConnection(url);
+      } catch(AuthenticationException ae) {
+        checkRetry = false;
+        throw new IOException("Authentication failed, url=" + url, ae);
+      }
+    }
+    
+    private void connect() throws IOException {
+      connect(op.getDoOutput());
+    }
+
+    private void connect(boolean doOutput) throws IOException {
+      conn.setRequestMethod(op.getType().toString());
+      conn.setDoOutput(doOutput);
+      conn.setInstanceFollowRedirects(false);
+      conn.connect();
+    }
+
+    private void disconnect() {
+      if (conn != null) {
+        conn.disconnect();
+        conn = null;
+      }
+    }
+
+    Runner run() throws IOException {
+      for(int retry = 0; ; retry++) {
+        try {
+          init();
+          if (op.getDoOutput()) {
+            twoStepWrite();
+          } else {
+            getResponse(op != GetOpParam.Op.OPEN);
+          }
+          return this;
+        } catch(IOException ioe) {
+          shouldRetry(ioe, retry);
+        }
+      }
+    }
+
+    private void shouldRetry(final IOException ioe, final int retry
+        ) throws IOException {
+      if (checkRetry) {
+        try {
+          final RetryPolicy.RetryAction a = retryPolicy.shouldRetry(
+              ioe, retry, 0, true);
+          if (a.action == RetryPolicy.RetryAction.RetryDecision.RETRY) {
+            LOG.info("Retrying connect to namenode: " + nnAddr
+                + ". Already tried " + retry + " time(s); retry policy is "
+                + retryPolicy + ", delay " + a.delayMillis + "ms.");      
+            Thread.sleep(a.delayMillis);
+            return;
+          }
+        } catch(Exception e) {
+          LOG.warn("Original exception is ", ioe);
+          throw toIOException(e);
+        }
+      }
+      throw toIOException(ioe);
+    }
+
+    /**
+     * Two-step Create/Append:
+     * Step 1) Submit a Http request with neither auto-redirect nor data. 
+     * Step 2) Submit another Http request with the URL from the Location header with data.
+     * 
+     * The reason of having two-step create/append is for preventing clients to
+     * send out the data before the redirect. This issue is addressed by the
+     * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
+     * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
+     * and Java 6 http client), which do not correctly implement "Expect:
+     * 100-continue". The two-step create/append is a temporary workaround for
+     * the software library bugs.
+     */
+    HttpURLConnection twoStepWrite() throws IOException {
+      //Step 1) Submit a Http request with neither auto-redirect nor data. 
+      connect(false);
+      validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn, false);
+      final String redirect = conn.getHeaderField("Location");
+      disconnect();
+      checkRetry = false;
+      
+      //Step 2) Submit another Http request with the URL from the Location header with data.
+      conn = (HttpURLConnection)new URL(redirect).openConnection();
+      conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM);
+      conn.setChunkedStreamingMode(32 << 10); //32kB-chunk
+      connect();
+      return conn;
+    }
+
+    FSDataOutputStream write(final int bufferSize) throws IOException {
+      return WebHdfsFileSystem.this.write(op, conn, bufferSize);
+    }
+
+    void getResponse(boolean getJsonAndDisconnect) throws IOException {
+      try {
+        connect();
+        final int code = conn.getResponseCode();
+        if (!redirected && op.getRedirect()
+            && code != op.getExpectedHttpResponseCode()) {
+          final String redirect = conn.getHeaderField("Location");
+          json = validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op),
+              conn, false);
+          disconnect();
+  
+          checkRetry = false;
+          conn = (HttpURLConnection)new URL(redirect).openConnection();
+          connect();
+        }
+
+        json = validateResponse(op, conn, false);
+        if (json == null && getJsonAndDisconnect) {
+          json = jsonParse(conn, false);
+        }
+      } finally {
+        if (getJsonAndDisconnect) {
+          disconnect();
+        }
+      }
     }
   }
 
@@ -577,7 +701,7 @@ public class WebHdfsFileSystem extends F
           super.close();
         } finally {
           try {
-            validateResponse(op, conn);
+            validateResponse(op, conn, true);
           } finally {
             conn.disconnect();
           }
@@ -593,13 +717,14 @@ public class WebHdfsFileSystem extends F
     statistics.incrementWriteOps(1);
 
     final HttpOpParam.Op op = PutOpParam.Op.CREATE;
-    final HttpURLConnection conn = httpConnect(op, f, 
+    return new Runner(op, f, 
         new PermissionParam(applyUMask(permission)),
         new OverwriteParam(overwrite),
         new BufferSizeParam(bufferSize),
         new ReplicationParam(replication),
-        new BlockSizeParam(blockSize));
-    return write(op, conn, bufferSize);
+        new BlockSizeParam(blockSize))
+      .run()
+      .write(bufferSize);
   }
 
   @Override
@@ -608,9 +733,9 @@ public class WebHdfsFileSystem extends F
     statistics.incrementWriteOps(1);
 
     final HttpOpParam.Op op = PostOpParam.Op.APPEND;
-    final HttpURLConnection conn = httpConnect(op, f, 
-        new BufferSizeParam(bufferSize));
-    return write(op, conn, bufferSize);
+    return new Runner(op, f, new BufferSizeParam(bufferSize))
+      .run()
+      .write(bufferSize);
   }
 
   @SuppressWarnings("deprecation")
@@ -637,26 +762,17 @@ public class WebHdfsFileSystem extends F
   }
 
   class OffsetUrlOpener extends ByteRangeInputStream.URLOpener {
-    /** The url with offset parameter */
-    private URL offsetUrl;
-  
     OffsetUrlOpener(final URL url) {
       super(url);
     }
 
-    /** Open connection with offset url. */
+    /** Setup offset url and connect. */
     @Override
-    protected HttpURLConnection openConnection() throws IOException {
-      return getHttpUrlConnection(offsetUrl);
-    }
-
-    /** Setup offset url before open connection. */
-    @Override
-    protected HttpURLConnection openConnection(final long offset) throws IOException {
-      offsetUrl = offset == 0L? url: new URL(url + "&" + new OffsetParam(offset));
-      final HttpURLConnection conn = openConnection();
-      conn.setRequestMethod("GET");
-      return conn;
+    protected HttpURLConnection connect(final long offset,
+        final boolean resolved) throws IOException {
+      final URL offsetUrl = offset == 0L? url
+          : new URL(url + "&" + new OffsetParam(offset));
+      return new Runner(GetOpParam.Op.OPEN, offsetUrl, resolved).run().conn;
     }  
   }
 
@@ -697,12 +813,6 @@ public class WebHdfsFileSystem extends F
     OffsetUrlInputStream(OffsetUrlOpener o, OffsetUrlOpener r) {
       super(o, r);
     }
-    
-    @Override
-    protected void checkResponseCode(final HttpURLConnection connection
-        ) throws IOException {
-      validateResponse(GetOpParam.Op.OPEN, connection);
-    }
 
     /** Remove offset parameter before returning the resolved url. */
     @Override
@@ -835,8 +945,7 @@ public class WebHdfsFileSystem extends F
     }
 
     private static WebHdfsFileSystem getWebHdfs(
-        final Token<?> token, final Configuration conf
-        ) throws IOException, InterruptedException, URISyntaxException {
+        final Token<?> token, final Configuration conf) throws IOException {
       
       final InetSocketAddress nnAddr = SecurityUtil.getTokenServiceAddr(token);
       final URI uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, nnAddr);
@@ -850,12 +959,7 @@ public class WebHdfsFileSystem extends F
       // update the kerberos credentials, if they are coming from a keytab
       ugi.reloginFromKeytab();
 
-      try {
-        WebHdfsFileSystem webhdfs = getWebHdfs(token, conf);
-        return webhdfs.renewDelegationToken(token);
-      } catch (URISyntaxException e) {
-        throw new IOException(e);
-      }
+      return getWebHdfs(token, conf).renewDelegationToken(token);
     }
   
     @Override
@@ -865,12 +969,7 @@ public class WebHdfsFileSystem extends F
       // update the kerberos credentials, if they are coming from a keytab
       ugi.checkTGTAndReloginFromKeytab();
 
-      try {
-        final WebHdfsFileSystem webhdfs = getWebHdfs(token, conf);
-        webhdfs.cancelDelegationToken(token);
-      } catch (URISyntaxException e) {
-        throw new IOException(e);
-      }
+      getWebHdfs(token, conf).cancelDelegationToken(token);
     }
   }
   

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java Fri Aug  3 19:00:15 2012
@@ -44,6 +44,11 @@ public class DeleteOpParam extends HttpO
     }
 
     @Override
+    public boolean getRedirect() {
+      return false;
+    }
+
+    @Override
     public int getExpectedHttpResponseCode() {
       return expectedHttpResponseCode;
     }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java Fri Aug  3 19:00:15 2012
@@ -23,25 +23,27 @@ import java.net.HttpURLConnection;
 public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
   /** Get operations. */
   public static enum Op implements HttpOpParam.Op {
-    OPEN(HttpURLConnection.HTTP_OK),
+    OPEN(true, HttpURLConnection.HTTP_OK),
 
-    GETFILESTATUS(HttpURLConnection.HTTP_OK),
-    LISTSTATUS(HttpURLConnection.HTTP_OK),
-    GETCONTENTSUMMARY(HttpURLConnection.HTTP_OK),
-    GETFILECHECKSUM(HttpURLConnection.HTTP_OK),
-
-    GETHOMEDIRECTORY(HttpURLConnection.HTTP_OK),
-    GETDELEGATIONTOKEN(HttpURLConnection.HTTP_OK),
-    GETDELEGATIONTOKENS(HttpURLConnection.HTTP_OK),
+    GETFILESTATUS(false, HttpURLConnection.HTTP_OK),
+    LISTSTATUS(false, HttpURLConnection.HTTP_OK),
+    GETCONTENTSUMMARY(false, HttpURLConnection.HTTP_OK),
+    GETFILECHECKSUM(true, HttpURLConnection.HTTP_OK),
+
+    GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK),
+    GETDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK),
+    GETDELEGATIONTOKENS(false, HttpURLConnection.HTTP_OK),
 
     /** GET_BLOCK_LOCATIONS is a private unstable op. */
-    GET_BLOCK_LOCATIONS(HttpURLConnection.HTTP_OK),
+    GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
 
-    NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
+    NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
 
+    final boolean redirect;
     final int expectedHttpResponseCode;
 
-    Op(final int expectedHttpResponseCode) {
+    Op(final boolean redirect, final int expectedHttpResponseCode) {
+      this.redirect = redirect;
       this.expectedHttpResponseCode = expectedHttpResponseCode;
     }
 
@@ -56,6 +58,11 @@ public class GetOpParam extends HttpOpPa
     }
 
     @Override
+    public boolean getRedirect() {
+      return redirect;
+    }
+
+    @Override
     public int getExpectedHttpResponseCode() {
       return expectedHttpResponseCode;
     }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java Fri Aug  3 19:00:15 2012
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hdfs.web.resources;
 
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
 import javax.ws.rs.core.Response;
 
 
@@ -42,6 +46,9 @@ public abstract class HttpOpParam<E exte
     /** @return true if the operation will do output. */
     public boolean getDoOutput();
 
+    /** @return true if the operation will be redirected. */
+    public boolean getRedirect();
+
     /** @return true the expected http response code. */
     public int getExpectedHttpResponseCode();
 
@@ -51,15 +58,25 @@ public abstract class HttpOpParam<E exte
 
   /** Expects HTTP response 307 "Temporary Redirect". */
   public static class TemporaryRedirectOp implements Op {
-    static final TemporaryRedirectOp CREATE = new TemporaryRedirectOp(PutOpParam.Op.CREATE);
-    static final TemporaryRedirectOp APPEND = new TemporaryRedirectOp(PostOpParam.Op.APPEND);
+    static final TemporaryRedirectOp CREATE = new TemporaryRedirectOp(
+        PutOpParam.Op.CREATE);
+    static final TemporaryRedirectOp APPEND = new TemporaryRedirectOp(
+        PostOpParam.Op.APPEND);
+    static final TemporaryRedirectOp OPEN = new TemporaryRedirectOp(
+        GetOpParam.Op.OPEN);
+    static final TemporaryRedirectOp GETFILECHECKSUM = new TemporaryRedirectOp(
+        GetOpParam.Op.GETFILECHECKSUM);
     
+    static final List<TemporaryRedirectOp> values
+        = Collections.unmodifiableList(Arrays.asList(
+            new TemporaryRedirectOp[]{CREATE, APPEND, OPEN, GETFILECHECKSUM}));
+
     /** Get an object for the given op. */
     public static TemporaryRedirectOp valueOf(final Op op) {
-      if (op == CREATE.op) {
-        return CREATE;
-      } else if (op == APPEND.op) {
-        return APPEND;
+      for(TemporaryRedirectOp t : values) {
+        if (op == t.op) {
+          return t;
+        }
       }
       throw new IllegalArgumentException(op + " not found.");
     }
@@ -80,6 +97,11 @@ public abstract class HttpOpParam<E exte
       return op.getDoOutput();
     }
 
+    @Override
+    public boolean getRedirect() {
+      return false;
+    }
+
     /** Override the original expected response with "Temporary Redirect". */
     @Override
     public int getExpectedHttpResponseCode() {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java Fri Aug  3 19:00:15 2012
@@ -44,11 +44,17 @@ public class PostOpParam extends HttpOpP
     }
 
     @Override
+    public boolean getRedirect() {
+      return true;
+    }
+
+    @Override
     public int getExpectedHttpResponseCode() {
       return expectedHttpResponseCode;
     }
 
     /** @return a URI query string. */
+    @Override
     public String toQueryString() {
       return NAME + "=" + this;
     }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java Fri Aug  3 19:00:15 2012
@@ -39,11 +39,11 @@ public class PutOpParam extends HttpOpPa
     
     NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
 
-    final boolean doOutput;
+    final boolean doOutputAndRedirect;
     final int expectedHttpResponseCode;
 
-    Op(final boolean doOutput, final int expectedHttpResponseCode) {
-      this.doOutput = doOutput;
+    Op(final boolean doOutputAndRedirect, final int expectedHttpResponseCode) {
+      this.doOutputAndRedirect = doOutputAndRedirect;
       this.expectedHttpResponseCode = expectedHttpResponseCode;
     }
 
@@ -54,7 +54,12 @@ public class PutOpParam extends HttpOpPa
 
     @Override
     public boolean getDoOutput() {
-      return doOutput;
+      return doOutputAndRedirect;
+    }
+
+    @Override
+    public boolean getRedirect() {
+      return doOutputAndRedirect;
     }
 
     @Override

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1358480-1369130

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Fri Aug  3 19:00:15 2012
@@ -715,6 +715,80 @@
 </property>
 
 <property>
+  <name>dfs.datanode.readahead.bytes</name>
+  <value>4193404</value>
+  <description>
+        While reading block files, if the Hadoop native libraries are available,
+        the datanode can use the posix_fadvise system call to explicitly
+        page data into the operating system buffer cache ahead of the current
+        reader's position. This can improve performance especially when
+        disks are highly contended.
+
+        This configuration specifies the number of bytes ahead of the current
+        read position which the datanode will attempt to read ahead. This
+        feature may be disabled by configuring this property to 0.
+
+        If the native libraries are not available, this configuration has no
+        effect.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.drop.cache.behind.reads</name>
+  <value>false</value>
+  <description>
+        In some workloads, the data read from HDFS is known to be significantly
+        large enough that it is unlikely to be useful to cache it in the
+        operating system buffer cache. In this case, the DataNode may be
+        configured to automatically purge all data from the buffer cache
+        after it is delivered to the client. This behavior is automatically
+        disabled for workloads which read only short sections of a block
+        (e.g HBase random-IO workloads).
+
+        This may improve performance for some workloads by freeing buffer
+        cache spage usage for more cacheable data.
+
+        If the Hadoop native libraries are not available, this configuration
+        has no effect.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.drop.cache.behind.writes</name>
+  <value>false</value>
+  <description>
+        In some workloads, the data written to HDFS is known to be significantly
+        large enough that it is unlikely to be useful to cache it in the
+        operating system buffer cache. In this case, the DataNode may be
+        configured to automatically purge all data from the buffer cache
+        after it is written to disk.
+
+        This may improve performance for some workloads by freeing buffer
+        cache spage usage for more cacheable data.
+
+        If the Hadoop native libraries are not available, this configuration
+        has no effect.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.sync.behind.writes</name>
+  <value>false</value>
+  <description>
+        If this configuration is enabled, the datanode will instruct the
+        operating system to enqueue all written data to the disk immediately
+        after it is written. This differs from the usual OS policy which
+        may wait for up to 30 seconds before triggering writeback.
+
+        This may improve performance for some workloads by smoothing the
+        IO profile for data written to disk.
+
+        If the Hadoop native libraries are not available, this configuration
+        has no effect.
+  </description>
+</property>
+
+<property>
   <name>dfs.client.failover.max.attempts</name>
   <value>15</value>
   <description>
@@ -901,4 +975,43 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.webhdfs.enabled</name>
+  <value>false</value>
+  <description>
+    Enable WebHDFS (REST API) in Namenodes and Datanodes.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.fuse.connection.timeout</name>
+  <value>300</value>
+  <description>
+    The minimum number of seconds that we'll cache libhdfs connection objects
+    in fuse_dfs. Lower values will result in lower memory consumption; higher
+    values may speed up access by avoiding the overhead of creating new
+    connection objects.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.fuse.timer.period</name>
+  <value>5</value>
+  <description>
+    The number of seconds between cache expiry checks in fuse_dfs. Lower values
+    will result in fuse_dfs noticing changes to Kerberos ticket caches more
+    quickly.
+  </description>
+</property>
+
+<property>
+  <name>dfs.metrics.percentiles.intervals</name>
+  <value></value>
+  <description>
+    Comma-delimited set of integers denoting the desired rollover intervals 
+    (in seconds) for percentile latency metrics on the Namenode and Datanode.
+    By default, percentile latency metrics are disabled.
+  </description>
+</property>
+
 </configuration>

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1358480-1369130

Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1358480-1369130

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java Fri Aug  3 19:00:15 2012
@@ -17,7 +17,11 @@
  */
 package org.apache.hadoop.cli;
 
-import org.apache.hadoop.cli.util.*;
+import org.apache.hadoop.cli.util.CLICommandDFSAdmin;
+import org.apache.hadoop.cli.util.CLICommandTypes;
+import org.apache.hadoop.cli.util.CLITestCmd;
+import org.apache.hadoop.cli.util.CommandExecutor;
+import org.apache.hadoop.cli.util.FSCmdExecutor;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 
 public class CLITestCmdDFS extends CLITestCmd {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java Fri Aug  3 19:00:15 2012
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.cli;
 
+import static org.junit.Assert.assertTrue;
+
 import org.apache.hadoop.cli.util.CLICommand;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
 import org.apache.hadoop.fs.FileSystem;
@@ -27,7 +29,6 @@ import org.apache.hadoop.hdfs.HDFSPolicy
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.junit.After;
-import static org.junit.Assert.assertTrue;
 import org.junit.Before;
 import org.junit.Test;
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java Fri Aug  3 19:00:15 2012
@@ -55,6 +55,7 @@ public class TestFcHdfsCreateMkdir exten
     cluster.shutdown();   
   }
   
+  @Override
   @Before
   public void setUp() throws Exception {
     super.setUp();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java Fri Aug  3 19:00:15 2012
@@ -55,6 +55,7 @@ public class TestFcHdfsPermission extend
     cluster.shutdown();   
   }
   
+  @Override
   @Before
   public void setUp() throws Exception {
     super.setUp();
@@ -72,6 +73,7 @@ public class TestFcHdfsPermission extend
    */
   static final FsPermission FILE_MASK_IGNORE_X_BIT = 
     new FsPermission((short) ~0666);
+  @Override
   FsPermission getFileMask() {
     return FILE_MASK_IGNORE_X_BIT;
   }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java Fri Aug  3 19:00:15 2012
@@ -53,18 +53,22 @@ public class TestFcHdfsSymlink extends F
   private static WebHdfsFileSystem webhdfs;
 
   
+  @Override
   protected String getScheme() {
     return "hdfs";
   }
 
+  @Override
   protected String testBaseDir1() throws IOException {
     return "/test1";
   }
   
+  @Override
   protected String testBaseDir2() throws IOException {
     return "/test2";
   }
 
+  @Override
   protected URI testURI() {
     return cluster.getURI(0);
   }
@@ -93,7 +97,7 @@ public class TestFcHdfsSymlink extends F
   }
      
   @Test
-  /** Link from Hdfs to LocalFs */
+  /** Access a file using a link that spans Hdfs to LocalFs */
   public void testLinkAcrossFileSystems() throws IOException {
     Path localDir  = new Path("file://"+getAbsoluteTestRootDir(fc)+"/test");
     Path localFile = new Path("file://"+getAbsoluteTestRootDir(fc)+"/test/file");
@@ -108,7 +112,42 @@ public class TestFcHdfsSymlink extends F
     readFile(link);
     assertEquals(fileSize, fc.getFileStatus(link).getLen());
   }
-  
+
+  @Test
+  /** Test renaming a file across two file systems using a link */
+  public void testRenameAcrossFileSystemsViaLink() throws IOException {
+    Path localDir    = new Path("file://"+getAbsoluteTestRootDir(fc)+"/test");
+    Path hdfsFile    = new Path(testBaseDir1(), "file");
+    Path link        = new Path(testBaseDir1(), "link");
+    Path hdfsFileNew = new Path(testBaseDir1(), "fileNew");
+    Path hdfsFileNewViaLink = new Path(link, "fileNew");
+    FileContext localFc = FileContext.getLocalFSFileContext();
+    localFc.delete(localDir, true);
+    localFc.mkdir(localDir, FileContext.DEFAULT_PERM, true);
+    localFc.setWorkingDirectory(localDir);
+    createAndWriteFile(fc, hdfsFile);
+    fc.createSymlink(localDir, link, false);
+    // Rename hdfs://test1/file to hdfs://test1/link/fileNew
+    // which renames to file://TEST_ROOT/test/fileNew which
+    // spans AbstractFileSystems and therefore fails.
+    try {
+      fc.rename(hdfsFile, hdfsFileNewViaLink);
+      fail("Renamed across file systems");
+    } catch (InvalidPathException ipe) {
+      // Expected
+    }
+    // Now rename hdfs://test1/link/fileNew to hdfs://test1/fileNew
+    // which renames file://TEST_ROOT/test/fileNew to hdfs://test1/fileNew
+    // which spans AbstractFileSystems and therefore fails.
+    createAndWriteFile(fc, hdfsFileNewViaLink);
+    try {
+      fc.rename(hdfsFileNewViaLink, hdfsFileNew);
+      fail("Renamed across file systems");
+    } catch (InvalidPathException ipe) {
+      // Expected
+    }
+  }
+
   @Test
   /** Test access a symlink using AbstractFileSystem */
   public void testAccessLinkFromAbstractFileSystem() throws IOException {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java Fri Aug  3 19:00:15 2012
@@ -17,37 +17,43 @@
  */
 package org.apache.hadoop.fs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
 
-import junit.framework.TestCase;
+public class TestGlobPaths {
 
-public class TestGlobPaths extends TestCase {
-  
   static class RegexPathFilter implements PathFilter {
-    
+
     private final String regex;
     public RegexPathFilter(String regex) {
       this.regex = regex;
     }
 
+    @Override
     public boolean accept(Path path) {
       return path.toString().matches(regex);
     }
 
   }
-  
+
   static private MiniDFSCluster dfsCluster;
   static private FileSystem fs;
   static final private int NUM_OF_PATHS = 4;
   static final String USER_DIR = "/user/"+System.getProperty("user.name");
   private Path[] path = new Path[NUM_OF_PATHS];
-  
-  protected void setUp() throws Exception {
+
+  @Before
+  public void setUp() throws Exception {
     try {
       Configuration conf = new HdfsConfiguration();
       dfsCluster = new MiniDFSCluster.Builder(conf).build();
@@ -57,12 +63,14 @@ public class TestGlobPaths extends TestC
     }
   }
   
-  protected void tearDown() throws Exception {
+  @After
+  public void tearDown() throws Exception {
     if(dfsCluster!=null) {
       dfsCluster.shutdown();
     }
   }
   
+  @Test
   public void testPathFilter() throws IOException {
     try {
       String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b" };
@@ -75,6 +83,7 @@ public class TestGlobPaths extends TestC
     }
   }
   
+  @Test
   public void testPathFilterWithFixedLastComponent() throws IOException {
     try {
       String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b",
@@ -88,6 +97,7 @@ public class TestGlobPaths extends TestC
     }
   }
   
+  @Test
   public void testGlob() throws Exception {
     //pTestEscape(); // need to wait until HADOOP-1995 is fixed
     pTestJavaRegexSpecialChars();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java Fri Aug  3 19:00:15 2012
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.fs;
 
+import static org.apache.hadoop.fs.FileContextTestHelper.exists;
+import static org.apache.hadoop.fs.FileContextTestHelper.getTestRootPath;
+
 import java.io.IOException;
 import java.net.URISyntaxException;
 
@@ -27,8 +30,8 @@ import org.apache.hadoop.fs.Options.Rena
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -37,8 +40,6 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import static org.apache.hadoop.fs.FileContextTestHelper.*;
-
 public class TestHDFSFileContextMainOperations extends
     FileContextMainOperationsBaseTest {
   private static MiniDFSCluster cluster;
@@ -75,6 +76,7 @@ public class TestHDFSFileContextMainOper
     cluster.shutdown();   
   }
   
+  @Override
   @Before
   public void setUp() throws Exception {
     super.setUp();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java Fri Aug  3 19:00:15 2012
@@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java Fri Aug  3 19:00:15 2012
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.fs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
@@ -25,19 +28,15 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsUrlStreamHandlerFactory;
-import org.apache.hadoop.fs.Path;
+import org.junit.Test;
 
 /**
  * Test of the URL stream handler factory.
  */
-public class TestUrlStreamHandler extends TestCase {
+public class TestUrlStreamHandler {
 
   /**
    * Test opening and reading from an InputStream through a hdfs:// URL.
@@ -47,6 +46,7 @@ public class TestUrlStreamHandler extend
    * 
    * @throws IOException
    */
+  @Test
   public void testDfsUrls() throws IOException {
 
     Configuration conf = new HdfsConfiguration();
@@ -105,6 +105,7 @@ public class TestUrlStreamHandler extend
    * @throws IOException
    * @throws URISyntaxException
    */
+  @Test
   public void testFileUrls() throws IOException, URISyntaxException {
     // URLStreamHandler is already set in JVM by testDfsUrls() 
     Configuration conf = new HdfsConfiguration();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java Fri Aug  3 19:00:15 2012
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.fs.loadGenerator;
 
+import static org.junit.Assert.assertEquals;
+
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileReader;
@@ -27,9 +29,7 @@ import org.apache.hadoop.conf.Configured
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-
-import static org.junit.Assert.*;
-
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.Test;
@@ -171,7 +171,7 @@ public class TestLoadGenerator extends C
       args = new String[] {"-readProbability", "0.3", "-writeProbability", "0.3",
           "-root", TEST_SPACE_ROOT, "-maxDelayBetweenOps", "0",
           "-numOfThreads", "1", "-startTime", 
-          Long.toString(System.currentTimeMillis()), "-elapsedTime", "10"};
+          Long.toString(Time.now()), "-elapsedTime", "10"};
       
       assertEquals(0, lg.run(args));
 
@@ -227,7 +227,7 @@ public class TestLoadGenerator extends C
       String[] scriptArgs = new String[] {
           "-root", TEST_SPACE_ROOT, "-maxDelayBetweenOps", "0",
           "-numOfThreads", "10", "-startTime", 
-          Long.toString(System.currentTimeMillis()), "-scriptFile", script};
+          Long.toString(Time.now()), "-scriptFile", script};
       
       assertEquals(0, lg.run(scriptArgs));
       

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java Fri Aug  3 19:00:15 2012
@@ -17,9 +17,12 @@
  */
 package org.apache.hadoop.fs.permission;
 
-import java.io.IOException;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
-import junit.framework.TestCase;
+import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -32,8 +35,9 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Test;
 
-public class TestStickyBit extends TestCase {
+public class TestStickyBit {
 
   static UserGroupInformation user1 = 
     UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"});
@@ -158,6 +162,7 @@ public class TestStickyBit extends TestC
     assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit());
   }
 
+  @Test
   public void testGeneralSBBehavior() throws IOException, InterruptedException {
     MiniDFSCluster cluster = null;
     try {
@@ -195,6 +200,7 @@ public class TestStickyBit extends TestC
    * Test that one user can't rename/move another user's file when the sticky
    * bit is set.
    */
+  @Test
   public void testMovingFiles() throws IOException, InterruptedException {
     MiniDFSCluster cluster = null;
 
@@ -243,6 +249,7 @@ public class TestStickyBit extends TestC
    * the sticky bit back on re-start, and that no extra sticky bits appear after
    * re-start.
    */
+  @Test
   public void testStickyBitPersistence() throws IOException {
     MiniDFSCluster cluster = null;
     try {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java Fri Aug  3 19:00:15 2012
@@ -62,6 +62,7 @@ public class TestViewFileSystemAtHdfsRoo
     cluster.shutdown();   
   }
 
+  @Override
   @Before
   public void setUp() throws Exception {
     fsTarget = fHdfs;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java Fri Aug  3 19:00:15 2012
@@ -80,6 +80,7 @@ public class TestViewFileSystemHdfs exte
     cluster.shutdown();   
   }
 
+  @Override
   @Before
   public void setUp() throws Exception {
     // create the test root on local_fs
@@ -89,6 +90,7 @@ public class TestViewFileSystemHdfs exte
     super.setUp();
   }
 
+  @Override
   @After
   public void tearDown() throws Exception {
     super.tearDown();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java Fri Aug  3 19:00:15 2012
@@ -61,6 +61,7 @@ public class TestViewFsAtHdfsRoot extend
     cluster.shutdown();   
   }
 
+  @Override
   @Before
   public void setUp() throws Exception {
     // create the test root on local_fs

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java Fri Aug  3 19:00:15 2012
@@ -18,6 +18,19 @@
 package org.apache.hadoop.fs.viewfs;
 
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
@@ -25,38 +38,17 @@ import java.net.URISyntaxException;
 import javax.security.auth.login.LoginException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FsConstants;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.fs.FsServerDefaults;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
-
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 /**
  * Tests for viewfs implementation of default fs level values.

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java Fri Aug  3 19:00:15 2012
@@ -23,6 +23,9 @@ package org.apache.hadoop.fs.viewfs;
  * Since viewfs has overlayed ViewFsFileStatus, we ran into
  * serialization problems. THis test is test the fix.
  */
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
@@ -40,11 +43,9 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.security.UserGroupInformation;
-
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 public class TestViewFsFileStatusHdfs {
   

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java Fri Aug  3 19:00:15 2012
@@ -60,6 +60,7 @@ public class TestViewFsHdfs extends View
     cluster.shutdown();   
   }
 
+  @Override
   @Before
   public void setUp() throws Exception {
     // create the test root on local_fs