You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by wa...@apache.org on 2014/05/05 05:22:36 UTC

svn commit: r1592437 - in /hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/

Author: wang
Date: Mon May  5 03:22:36 2014
New Revision: 1592437

URL: http://svn.apache.org/r1592437
Log:
HDFS-6331. ClientProtocol#setXattr should not be annotated idempotent. Contributed by Uma Maheswara Rao G.

Modified:
    hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-2006.txt
    hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-2006.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-2006.txt?rev=1592437&r1=1592436&r2=1592437&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-2006.txt (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-2006.txt Mon May  5 03:22:36 2014
@@ -8,18 +8,21 @@ HDFS-2006 (Unreleased)
 
   IMPROVEMENTS
 
-   HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
+    HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
 
-   HDFS-6302. Implement XAttr as a INode feature. (Yi Liu via umamahesh)
+    HDFS-6302. Implement XAttr as a INode feature. (Yi Liu via umamahesh)
 
-   HDFS-6309. Javadocs for Xattrs apis in DFSClient and other minor fixups. (Charles Lamb via umamahesh)
+    HDFS-6309. Javadocs for Xattrs apis in DFSClient and other minor fixups. (Charles Lamb via umamahesh)
 
-   HDFS-6258. Namenode server-side storage for XAttrs. (Yi Liu via umamahesh)
+    HDFS-6258. Namenode server-side storage for XAttrs. (Yi Liu via umamahesh)
 
-   HDFS-6303. HDFS implementation of FileContext API for XAttrs. (Yi Liu and Charles Lamb via umamahesh)
+    HDFS-6303. HDFS implementation of FileContext API for XAttrs. (Yi Liu and Charles Lamb via umamahesh)
 
-   HDFS-6324. Shift XAttr helper code out for reuse. (Yi Liu via umamahesh)
+    HDFS-6324. Shift XAttr helper code out for reuse. (Yi Liu via umamahesh)
 
   OPTIMIZATIONS
 
   BUG FIXES
+
+    HDFS-6331. ClientProtocol#setXattr should not be annotated idempotent.
+    (umamahesh via wang)

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1592437&r1=1592436&r2=1592437&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Mon May  5 03:22:36 2014
@@ -1262,7 +1262,7 @@ public interface ClientProtocol {
    * @param flag set flag
    * @throws IOException
    */
-  @Idempotent
+  @AtMostOnce
   public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag) 
       throws IOException;
   

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1592437&r1=1592436&r2=1592437&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon May  5 03:22:36 2014
@@ -7708,17 +7708,45 @@ public class FSNamesystem implements Nam
     }
   }
   
-  void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag) 
-      throws IOException {
-    nnConf.checkXAttrsConfigFlag();
-    HdfsFileStatus resultingStat = null;
-    FSPermissionChecker pc = getPermissionChecker();
+  /**
+   * Set xattr for a file or directory.
+   * 
+   * @param src
+   *          - path on which it sets the xattr
+   * @param xAttr
+   *          - xAttr details to set
+   * @param flag
+   *          - xAttrs flags
+   * @throws AccessControlException
+   * @throws SafeModeException
+   * @throws UnresolvedLinkException
+   * @throws IOException
+   */
+  void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
+      throws AccessControlException, SafeModeException,
+      UnresolvedLinkException, IOException {
+    CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+    if (cacheEntry != null && cacheEntry.isSuccess()) {
+      return; // Return previous response
+    }
+    boolean success = false;
     try {
-      XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+      setXAttrInt(src, xAttr, flag);
+      success = true;
     } catch (AccessControlException e) {
       logAuditEvent(false, "setXAttr", src);
       throw e;
+    } finally {
+      RetryCache.setState(cacheEntry, success);
     }
+  }
+  
+  private void setXAttrInt(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag) 
+      throws IOException {
+    nnConf.checkXAttrsConfigFlag();
+    HdfsFileStatus resultingStat = null;
+    FSPermissionChecker pc = getPermissionChecker();
+    XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
@@ -7729,12 +7757,8 @@ public class FSNamesystem implements Nam
       if (isPermissionEnabled) {
         checkPathAccess(pc, src, FsAction.WRITE);
       }
-      
       dir.setXAttr(src, xAttr, flag);
       resultingStat = getAuditFileInfo(src, false);
-    } catch (AccessControlException e) {
-      logAuditEvent(false, "setXAttr", src);
-      throw e;
     } finally {
       writeUnlock();
     }

Modified: hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1592437&r1=1592436&r2=1592437&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Mon May  5 03:22:36 2014
@@ -33,6 +33,7 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Random;
+import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.logging.Log;
@@ -45,6 +46,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -126,6 +128,7 @@ public class TestRetryCacheWithHA {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
         .numDataNodes(DataNodes).build();
@@ -1004,6 +1007,48 @@ public class TestRetryCacheWithHA {
       return null;
     }
   }
+  
+  /** setXAttr */
+  class SetXAttrOp extends AtMostOnceOp {
+    private final String src;
+
+    SetXAttrOp(DFSClient client, String src) {
+      super("setXAttr", client);
+      this.src = src;
+    }
+
+    @Override
+    void prepare() throws Exception {
+      Path p = new Path(src);
+      if (!dfs.exists(p)) {
+        DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0);
+      }
+    }
+
+    @Override
+    void invoke() throws Exception {
+      client.setXAttr(src, "user.key", "value".getBytes(),
+          EnumSet.of(XAttrSetFlag.CREATE));
+    }
+
+    @Override
+    boolean checkNamenodeBeforeReturn() throws Exception {
+      for (int i = 0; i < CHECKTIMES; i++) {
+        Map<String, byte[]> iter = dfs.getXAttrs(new Path(src));
+        Set<String> keySet = iter.keySet();
+        if (keySet.contains("user.key")) {
+          return true;
+        }
+        Thread.sleep(1000);
+      }
+      return false;
+    }
+
+    @Override
+    Object getResult() {
+      return null;
+    }
+  }
 
   @Test (timeout=60000)
   public void testCreateSnapshot() throws Exception {
@@ -1133,6 +1178,13 @@ public class TestRetryCacheWithHA {
     AtMostOnceOp op = new RemoveCachePoolOp(client, "pool");
     testClientRetryWithFailover(op);
   }
+  
+  @Test (timeout=60000)
+  public void testSetXAttr() throws Exception {
+    DFSClient client = genClientWithDummyHandler();
+    AtMostOnceOp op = new SetXAttrOp(client, "/setxattr");
+    testClientRetryWithFailover(op);
+  }
 
   /**
    * When NN failover happens, if the client did not receive the response and