You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2013/09/06 21:09:42 UTC

svn commit: r1520669 - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/util/ src/test/java/org/apache/hadoop/hdfs/util/

Author: cmccabe
Date: Fri Sep  6 19:09:41 2013
New Revision: 1520669

URL: http://svn.apache.org/r1520669
Log:
HDFS-4879. Add BlockedArrayList collection to avoid CMS full GCs (Contributed by Todd Lipcon)

Added:
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ChunkedArrayList.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java
Modified:
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1520669&r1=1520668&r2=1520669&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Sep  6 19:09:41 2013
@@ -31,6 +31,9 @@ Release 2.3.0 - UNRELEASED
 
     HDFS-4491. Parallel testing HDFS. (Andrey Klochkov via cnauroth)
 
+    HDFS-4879. Add "blocked ArrayList" collection to avoid CMS full GCs
+    (Todd Lipcon via Colin Patrick McCabe)
+
   OPTIMIZATIONS
 
   BUG FIXES

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1520669&r1=1520668&r2=1520669&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Fri Sep  6 19:09:41 2013
@@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
 import org.apache.hadoop.hdfs.util.ByteArray;
+import org.apache.hadoop.hdfs.util.ChunkedArrayList;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -949,7 +950,7 @@ public class FSDirectory implements Clos
         if (removedDst != null) {
           undoRemoveDst = false;
           BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
-          List<INode> removedINodes = new ArrayList<INode>();
+          List<INode> removedINodes = new ChunkedArrayList<INode>();
           filesDeleted = removedDst.cleanSubtree(null,
               dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes, true)
               .get(Quota.NAMESPACE);
@@ -1363,7 +1364,7 @@ public class FSDirectory implements Clos
       QuotaExceededException, SnapshotAccessControlException {
     assert hasWriteLock();
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
-    List<INode> removedINodes = new ArrayList<INode>();
+    List<INode> removedINodes = new ChunkedArrayList<INode>();
 
     final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
         normalizePath(src), false);

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1520669&r1=1520668&r2=1520669&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Fri Sep  6 19:09:41 2013
@@ -23,7 +23,6 @@ import java.io.File;
 import java.io.FilterInputStream;
 import java.io.IOException;
 import java.io.InputStream;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumMap;
 import java.util.List;
@@ -77,6 +76,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
+import org.apache.hadoop.hdfs.util.ChunkedArrayList;
 import org.apache.hadoop.hdfs.util.Holder;
 
 import com.google.common.base.Joiner;
@@ -584,7 +584,7 @@ public class FSEditLogLoader {
     case OP_DELETE_SNAPSHOT: {
       DeleteSnapshotOp deleteSnapshotOp = (DeleteSnapshotOp) op;
       BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
-      List<INode> removedINodes = new ArrayList<INode>();
+      List<INode> removedINodes = new ChunkedArrayList<INode>();
       fsNamesys.getSnapshotManager().deleteSnapshot(
           deleteSnapshotOp.snapshotRoot, deleteSnapshotOp.snapshotName,
           collectedBlocks, removedINodes);

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1520669&r1=1520668&r2=1520669&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Sep  6 19:09:41 2013
@@ -201,6 +201,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.util.ChunkedArrayList;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RetryCache;
@@ -3113,7 +3114,7 @@ public class FSNamesystem implements Nam
       throws AccessControlException, SafeModeException, UnresolvedLinkException,
              IOException {
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
-    List<INode> removedINodes = new ArrayList<INode>();
+    List<INode> removedINodes = new ChunkedArrayList<INode>();
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
@@ -3167,21 +3168,17 @@ public class FSNamesystem implements Nam
    *          of blocks that need to be removed from blocksMap
    */
   void removeBlocks(BlocksMapUpdateInfo blocks) {
-    int start = 0;
-    int end = 0;
     List<Block> toDeleteList = blocks.getToDeleteList();
-    while (start < toDeleteList.size()) {
-      end = BLOCK_DELETION_INCREMENT + start;
-      end = end > toDeleteList.size() ? toDeleteList.size() : end;
+    Iterator<Block> iter = toDeleteList.iterator();
+    while (iter.hasNext()) {
       writeLock();
       try {
-        for (int i = start; i < end; i++) {
-          blockManager.removeBlock(toDeleteList.get(i));
+        for (int i = 0; i < BLOCK_DELETION_INCREMENT && iter.hasNext(); i++) {
+          blockManager.removeBlock(iter.next());
         }
       } finally {
         writeUnlock();
       }
-      start = end;
     }
   }
   
@@ -6761,7 +6758,7 @@ public class FSNamesystem implements Nam
       checkOwner(pc, snapshotRoot);
 
       BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
-      List<INode> removedINodes = new ArrayList<INode>();
+      List<INode> removedINodes = new ChunkedArrayList<INode>();
       dir.writeLock();
       try {
         snapshotManager.deleteSnapshot(snapshotRoot, snapshotName,

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1520669&r1=1520668&r2=1520669&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Fri Sep  6 19:09:41 2013
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.ChunkedArrayList;
 import org.apache.hadoop.hdfs.util.Diff;
 import org.apache.hadoop.util.StringUtils;
 
@@ -707,7 +708,7 @@ public abstract class INode implements I
     }
     
     public BlocksMapUpdateInfo() {
-      toDeleteList = new ArrayList<Block>();
+      toDeleteList = new ChunkedArrayList<Block>();
     }
     
     /**

Added: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ChunkedArrayList.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ChunkedArrayList.java?rev=1520669&view=auto
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ChunkedArrayList.java (added)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ChunkedArrayList.java Fri Sep  6 19:09:41 2013
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import java.util.AbstractList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+
+/**
+ * Simplified List implementation which stores elements as a list
+ * of chunks, each chunk having a maximum size. This improves over
+ * using an ArrayList in that creating a large list will never require
+ * a large amount of contiguous heap space -- thus reducing the likelihood
+ * of triggering a CMS compaction pause due to heap fragmentation.
+ * 
+ * The first chunks allocated are small, but each additional chunk is
+ * 50% larger than the previous, ramping up to a configurable maximum
+ * chunk size. Reasonable defaults are provided which should be a good
+ * balance between not making any large allocations while still retaining
+ * decent performance.
+ *
+ * This currently only supports a small subset of List operations --
+ * namely addition and iteration.
+ */
+@InterfaceAudience.Private
+public class ChunkedArrayList<T> extends AbstractList<T> {
+
+  /**
+   * The chunks which make up the full list.
+   */
+  private final List<List<T>> chunks = Lists.newArrayList();
+  
+  /**
+   * Cache of the last element in the 'chunks' array above.
+   * This speeds up the add operation measurably.
+   */
+  private List<T> lastChunk = null;
+
+  /**
+   * The capacity with which the last chunk was allocated.
+   */
+  private int lastChunkCapacity;
+  
+  /**
+   * The capacity of the first chunk to allocate in a cleared list.
+   */
+  private final int initialChunkCapacity;
+  
+  /**
+   * The maximum number of elements for any chunk.
+   */
+  private final int maxChunkSize;
+
+  /**
+   * Total number of elements in the list.
+   */
+  private int size;
+  
+  /**
+   * Default initial size is 6 elements, since typical minimum object
+   * size is 64 bytes, and this leaves enough space for the object
+   * header.
+   */
+  private static final int DEFAULT_INITIAL_CHUNK_CAPACITY = 6;
+  
+  /**
+   * Default max size is 8K elements - which, at 8 bytes per element
+   * should be about 64KB -- small enough to easily fit in contiguous
+   * free heap space even with a fair amount of fragmentation.
+   */
+  private static final int DEFAULT_MAX_CHUNK_SIZE = 8*1024;
+  
+
+  public ChunkedArrayList() {
+    this(DEFAULT_INITIAL_CHUNK_CAPACITY, DEFAULT_MAX_CHUNK_SIZE);
+  }
+
+  /**
+   * @param initialChunkCapacity the capacity of the first chunk to be
+   * allocated
+   * @param maxChunkSize the maximum size of any chunk allocated
+   */
+  public ChunkedArrayList(int initialChunkCapacity, int maxChunkSize) {
+    Preconditions.checkArgument(maxChunkSize >= initialChunkCapacity);
+    this.initialChunkCapacity = initialChunkCapacity;
+    this.maxChunkSize = maxChunkSize;
+  }
+
+  @Override
+  public Iterator<T> iterator() {
+    return Iterables.concat(chunks).iterator();
+  }
+
+  @Override
+  public boolean add(T e) {
+    if (lastChunk == null) {
+      addChunk(initialChunkCapacity);
+    } else if (lastChunk.size() >= lastChunkCapacity) {
+      int newCapacity = lastChunkCapacity + (lastChunkCapacity >> 1);
+      addChunk(Math.min(newCapacity, maxChunkSize));
+    }
+    size++;
+    return lastChunk.add(e);
+  }
+
+  @Override
+  public void clear() {
+    chunks.clear();
+    lastChunk = null;
+    lastChunkCapacity = 0;
+    size = 0;
+  }
+  
+  private void addChunk(int capacity) {
+    lastChunk = Lists.newArrayListWithCapacity(capacity);
+    chunks.add(lastChunk);
+    lastChunkCapacity = capacity;
+  }
+
+  @Override
+  public boolean isEmpty() {
+    return size == 0;
+  }
+
+  @Override
+  public int size() {
+    return size;
+  }
+  
+  @VisibleForTesting
+  int getNumChunks() {
+    return chunks.size();
+  }
+  
+  @VisibleForTesting
+  int getMaxChunkSize() {
+    int size = 0;
+    for (List<T> chunk : chunks) {
+      size = Math.max(size, chunk.size());
+    }
+    return size;
+  }
+
+  @Override
+  public T get(int arg0) {
+    throw new UnsupportedOperationException(
+        this.getClass().getName() + " does not support random access");
+  }
+}

Added: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java?rev=1520669&view=auto
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java (added)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java Fri Sep  6 19:09:41 2013
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import static org.junit.Assert.*;
+
+import java.util.ArrayList;
+
+import org.junit.Test;
+
+import com.google.common.base.Stopwatch;
+
+public class TestChunkedArrayList {
+
+  @Test
+  public void testBasics() {
+    final int N_ELEMS = 100000;
+    ChunkedArrayList<Integer> l = new ChunkedArrayList<Integer>();
+    assertTrue(l.isEmpty());
+    // Insert a bunch of elements.
+    for (int i = 0; i < N_ELEMS; i++) {
+      l.add(i);
+    }
+    assertFalse(l.isEmpty());
+    assertEquals(N_ELEMS, l.size());
+
+    // Check that it got chunked.
+    assertTrue(l.getNumChunks() > 10);
+    assertEquals(8192, l.getMaxChunkSize());
+  }
+  
+  @Test
+  public void testIterator() {
+    ChunkedArrayList<Integer> l = new ChunkedArrayList<Integer>();
+    for (int i = 0; i < 30000; i++) {
+      l.add(i);
+    }
+    
+    int i = 0;
+    for (int fromList : l) {
+      assertEquals(i, fromList);
+      i++;
+    }
+  }
+  
+  @Test
+  public void testPerformance() {
+    String obj = "hello world";
+    
+    final int numElems = 1000000;
+    final int numTrials = 5;
+    
+    for (int trial = 0; trial < numTrials; trial++) {
+      System.gc();
+      {
+        ArrayList<String> arrayList = new ArrayList<String>();
+        Stopwatch sw = new Stopwatch();
+        sw.start();
+        for (int i = 0; i < numElems; i++) {
+          arrayList.add(obj);
+        }
+        System.out.println("       ArrayList " + sw.elapsedMillis());
+      }
+      
+      // test ChunkedArrayList
+      System.gc();
+      {
+        ChunkedArrayList<String> chunkedList = new ChunkedArrayList<String>();
+        Stopwatch sw = new Stopwatch();
+        sw.start();
+        for (int i = 0; i < numElems; i++) {
+          chunkedList.add(obj);
+        }
+        System.out.println("ChunkedArrayList " + sw.elapsedMillis());
+      }
+    }
+  }
+}