You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2014/07/10 08:16:35 UTC

svn commit: r1609401 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/util/ src/test/java/org/apache/hadoop/hdfs/server/namenode/

Author: szetszwo
Date: Thu Jul 10 06:16:34 2014
New Revision: 1609401

URL: http://svn.apache.org/r1609401
Log:
HDFS-6643. Refactor INodeWithAdditionalFields.PermissionStatusFormat and INodeFile.HeaderFormat.

Added:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1609401&r1=1609400&r2=1609401&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Jul 10 06:16:34 2014
@@ -278,6 +278,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-6645. Add test for successive Snapshots between XAttr modifications.
     (Stephen Chu via jing9)
 
+    HDFS-6643. Refactor INodeWithAdditionalFields.PermissionStatusFormat and
+    INodeFile.HeaderFormat. (szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java?rev=1609401&r1=1609400&r2=1609401&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java Thu Jul 10 06:16:34 2014
@@ -97,14 +97,12 @@ public interface INodeAttributes {
 
     @Override
     public final String getUserName() {
-      final int n = (int)PermissionStatusFormat.USER.retrieve(permission);
-      return SerialNumberManager.INSTANCE.getUser(n);
+      return PermissionStatusFormat.getUser(permission);
     }
 
     @Override
     public final String getGroupName() {
-      final int n = (int)PermissionStatusFormat.GROUP.retrieve(permission);
-      return SerialNumberManager.INSTANCE.getGroup(n);
+      return PermissionStatusFormat.getGroup(permission);
     }
 
     @Override
@@ -114,7 +112,7 @@ public interface INodeAttributes {
 
     @Override
     public final short getFsPermissionShort() {
-      return (short)PermissionStatusFormat.MODE.retrieve(permission);
+      return PermissionStatusFormat.getMode(permission);
     }
     
     @Override

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1609401&r1=1609400&r2=1609401&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Thu Jul 10 06:16:34 2014
@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.LongBitFormat;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -71,37 +72,29 @@ public class INodeFile extends INodeWith
   }
 
   /** Format: [16 bits for replication][48 bits for PreferredBlockSize] */
-  static class HeaderFormat {
-    /** Number of bits for Block size */
-    static final int BLOCKBITS = 48;
-    /** Header mask 64-bit representation */
-    static final long HEADERMASK = 0xffffL << BLOCKBITS;
-    static final long MAX_BLOCK_SIZE = ~HEADERMASK; 
-    
-    static short getReplication(long header) {
-      return (short) ((header & HEADERMASK) >> BLOCKBITS);
+  static enum HeaderFormat {
+    PREFERRED_BLOCK_SIZE(null, 48, 1),
+    REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 16, 1);
+
+    private final LongBitFormat BITS;
+
+    private HeaderFormat(LongBitFormat previous, int length, long min) {
+      BITS = new LongBitFormat(name(), previous, length, min);
     }
 
-    static long combineReplication(long header, short replication) {
-      if (replication <= 0) {
-         throw new IllegalArgumentException(
-             "Unexpected value for the replication: " + replication);
-      }
-      return ((long)replication << BLOCKBITS) | (header & MAX_BLOCK_SIZE);
+    static short getReplication(long header) {
+      return (short)REPLICATION.BITS.retrieve(header);
     }
-    
+
     static long getPreferredBlockSize(long header) {
-      return header & MAX_BLOCK_SIZE;
+      return PREFERRED_BLOCK_SIZE.BITS.retrieve(header);
     }
 
-    static long combinePreferredBlockSize(long header, long blockSize) {
-      if (blockSize < 0) {
-         throw new IllegalArgumentException("Block size < 0: " + blockSize);
-      } else if (blockSize > MAX_BLOCK_SIZE) {
-        throw new IllegalArgumentException("Block size = " + blockSize
-            + " > MAX_BLOCK_SIZE = " + MAX_BLOCK_SIZE);
-     }
-      return (header & HEADERMASK) | (blockSize & MAX_BLOCK_SIZE);
+    static long toLong(long preferredBlockSize, short replication) {
+      long h = 0;
+      h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
+      h = REPLICATION.BITS.combine(replication, h);
+      return h;
     }
   }
 
@@ -113,8 +106,7 @@ public class INodeFile extends INodeWith
       long atime, BlockInfo[] blklist, short replication,
       long preferredBlockSize) {
     super(id, name, permissions, mtime, atime);
-    header = HeaderFormat.combineReplication(header, replication);
-    header = HeaderFormat.combinePreferredBlockSize(header, preferredBlockSize);
+    header = HeaderFormat.toLong(preferredBlockSize, replication);
     this.blocks = blklist;
   }
   
@@ -347,7 +339,7 @@ public class INodeFile extends INodeWith
 
   /** Set the replication factor of this file. */
   public final void setFileReplication(short replication) {
-    header = HeaderFormat.combineReplication(header, replication);
+    header = HeaderFormat.REPLICATION.BITS.combine(replication, header);
   }
 
   /** Set the replication factor of this file. */

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java?rev=1609401&r1=1609400&r2=1609401&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java Thu Jul 10 06:16:34 2014
@@ -48,9 +48,7 @@ public interface INodeFileAttributes ext
         short replication, long preferredBlockSize, XAttrFeature xAttrsFeature) {
       super(name, permissions, aclFeature, modificationTime, accessTime, 
           xAttrsFeature);
-
-      final long h = HeaderFormat.combineReplication(0L, replication);
-      header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize);
+      header = HeaderFormat.toLong(preferredBlockSize, replication);
     }
 
     public SnapshotCopy(INodeFile file) {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java?rev=1609401&r1=1609400&r2=1609401&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java Thu Jul 10 06:16:34 2014
@@ -21,9 +21,8 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.server.namenode.INode.Feature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
-import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
+import org.apache.hadoop.hdfs.util.LongBitFormat;
 import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
 
 import com.google.common.base.Preconditions;
@@ -36,26 +35,28 @@ import com.google.common.base.Preconditi
 public abstract class INodeWithAdditionalFields extends INode
     implements LinkedElement {
   static enum PermissionStatusFormat {
-    MODE(0, 16),
-    GROUP(MODE.OFFSET + MODE.LENGTH, 25),
-    USER(GROUP.OFFSET + GROUP.LENGTH, 23);
+    MODE(null, 16),
+    GROUP(MODE.BITS, 25),
+    USER(GROUP.BITS, 23);
 
-    final int OFFSET;
-    final int LENGTH; //bit length
-    final long MASK;
+    final LongBitFormat BITS;
 
-    PermissionStatusFormat(int offset, int length) {
-      OFFSET = offset;
-      LENGTH = length;
-      MASK = ((-1L) >>> (64 - LENGTH)) << OFFSET;
+    private PermissionStatusFormat(LongBitFormat previous, int length) {
+      BITS = new LongBitFormat(name(), previous, length, 0);
     }
 
-    long retrieve(long record) {
-      return (record & MASK) >>> OFFSET;
+    static String getUser(long permission) {
+      final int n = (int)USER.BITS.retrieve(permission);
+      return SerialNumberManager.INSTANCE.getUser(n);
     }
 
-    long combine(long bits, long record) {
-      return (record & ~MASK) | (bits << OFFSET);
+    static String getGroup(long permission) {
+      final int n = (int)GROUP.BITS.retrieve(permission);
+      return SerialNumberManager.INSTANCE.getGroup(n);
+    }
+    
+    static short getMode(long permission) {
+      return (short)MODE.BITS.retrieve(permission);
     }
 
     /** Encode the {@link PermissionStatus} to a long. */
@@ -63,12 +64,12 @@ public abstract class INodeWithAdditiona
       long permission = 0L;
       final int user = SerialNumberManager.INSTANCE.getUserSerialNumber(
           ps.getUserName());
-      permission = USER.combine(user, permission);
+      permission = USER.BITS.combine(user, permission);
       final int group = SerialNumberManager.INSTANCE.getGroupSerialNumber(
           ps.getGroupName());
-      permission = GROUP.combine(group, permission);
+      permission = GROUP.BITS.combine(group, permission);
       final int mode = ps.getPermission().toShort();
-      permission = MODE.combine(mode, permission);
+      permission = MODE.BITS.combine(mode, permission);
       return permission;
     }
   }
@@ -162,7 +163,7 @@ public abstract class INodeWithAdditiona
   }
 
   private final void updatePermissionStatus(PermissionStatusFormat f, long n) {
-    this.permission = f.combine(n, permission);
+    this.permission = f.BITS.combine(n, permission);
   }
 
   @Override
@@ -170,9 +171,7 @@ public abstract class INodeWithAdditiona
     if (snapshotId != Snapshot.CURRENT_STATE_ID) {
       return getSnapshotINode(snapshotId).getUserName();
     }
-
-    int n = (int)PermissionStatusFormat.USER.retrieve(permission);
-    return SerialNumberManager.INSTANCE.getUser(n);
+    return PermissionStatusFormat.getUser(permission);
   }
 
   @Override
@@ -186,9 +185,7 @@ public abstract class INodeWithAdditiona
     if (snapshotId != Snapshot.CURRENT_STATE_ID) {
       return getSnapshotINode(snapshotId).getGroupName();
     }
-
-    int n = (int)PermissionStatusFormat.GROUP.retrieve(permission);
-    return SerialNumberManager.INSTANCE.getGroup(n);
+    return PermissionStatusFormat.getGroup(permission);
   }
 
   @Override
@@ -208,7 +205,7 @@ public abstract class INodeWithAdditiona
 
   @Override
   public final short getFsPermissionShort() {
-    return (short)PermissionStatusFormat.MODE.retrieve(permission);
+    return PermissionStatusFormat.getMode(permission);
   }
   @Override
   void setPermission(FsPermission permission) {

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java?rev=1609401&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java (added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java Thu Jul 10 06:16:34 2014
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import java.io.Serializable;
+
+
+/**
+ * Bit format in a long.
+ */
+public class LongBitFormat implements Serializable {
+  private static final long serialVersionUID = 1L;
+
+  private final String NAME;
+  /** Bit offset */
+  private final int OFFSET;
+  /** Bit length */
+  private final int LENGTH;
+  /** Minimum value */
+  private final long MIN;
+  /** Maximum value */
+  private final long MAX;
+  /** Bit mask */
+  private final long MASK;
+
+  public LongBitFormat(String name, LongBitFormat previous, int length, long min) {
+    NAME = name;
+    OFFSET = previous == null? 0: previous.OFFSET + previous.LENGTH;
+    LENGTH = length;
+    MIN = min;
+    MAX = ((-1L) >>> (64 - LENGTH));
+    MASK = MAX << OFFSET;
+  }
+
+  /** Retrieve the value from the record. */
+  public long retrieve(long record) {
+    return (record & MASK) >>> OFFSET;
+  }
+
+  /** Combine the value to the record. */
+  public long combine(long value, long record) {
+    if (value < MIN) {
+      throw new IllegalArgumentException(
+          "Illagal value: " + NAME + " = " + value + " < MIN = " + MIN);
+    }
+    if (value > MAX) {
+      throw new IllegalArgumentException(
+          "Illagal value: " + NAME + " = " + value + " > MAX = " + MAX);
+    }
+    return (record & ~MASK) | (value << OFFSET);
+  }
+}

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1609401&r1=1609400&r2=1609401&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Thu Jul 10 06:16:34 2014
@@ -77,7 +77,7 @@ public class TestINodeFile {
   private final PermissionStatus perm = new PermissionStatus(
       "userName", null, FsPermission.getDefault());
   private short replication;
-  private long preferredBlockSize;
+  private long preferredBlockSize = 1024;
 
   INodeFile createINodeFile(short replication, long preferredBlockSize) {
     return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,