You are viewing a plain text version of this content. The canonical link for it is here.
Posted to oak-commits@jackrabbit.apache.org by md...@apache.org on 2018/10/22 12:26:14 UTC

svn commit: r1844549 - in /jackrabbit/oak/trunk/oak-segment-tar/src: main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterPool.java test/java/org/apache/jackrabbit/oak/segment/file/FileStoreIT.java

Author: mduerig
Date: Mon Oct 22 12:26:14 2018
New Revision: 1844549

URL: http://svn.apache.org/viewvc?rev=1844549&view=rev
Log:
OAK-7853: SegmentBufferWriter not flushed after OnRC
Test case and added a fix that flushes SBW instances disposed after OnRC

Modified:
    jackrabbit/oak/trunk/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterPool.java
    jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/FileStoreIT.java

Modified: jackrabbit/oak/trunk/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterPool.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterPool.java?rev=1844549&r1=1844548&r2=1844549&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterPool.java (original)
+++ jackrabbit/oak/trunk/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterPool.java Mon Oct 22 12:26:14 2018
@@ -67,6 +67,11 @@ public class SegmentBufferWriterPool imp
      */
     private final Set<SegmentBufferWriter> disposed = newHashSet();
 
+    /**
+     * Retired writers that have not yet been flushed from a previous GC generation
+     */
+    private final Set<SegmentBufferWriter> disposedOldGen = newHashSet();
+
     @NotNull
     private final SegmentIdProvider idProvider;
 
@@ -115,6 +120,11 @@ public class SegmentBufferWriterPool imp
             toFlush.addAll(writers.values());
             writers.clear();
 
+            // Collect all writers from old GC generations that
+            // have been disposed
+            toFlush.addAll(disposedOldGen);
+            disposedOldGen.clear();
+
             // Collect all borrowed writers, which we need to wait for.
             // Clear the list so they will get disposed once returned.
             toReturn.addAll(borrowed);
@@ -191,7 +201,7 @@ public class SegmentBufferWriterPool imp
                         gcGeneration.get()
                 );
             } else if (!writer.getGCGeneration().equals(gcGeneration.get())) {
-                disposed.add(writer);
+                disposedOldGen.add(writer);
                 writer = new SegmentBufferWriter(
                         idProvider,
                         reader,

Modified: jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/FileStoreIT.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/FileStoreIT.java?rev=1844549&r1=1844548&r2=1844549&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/FileStoreIT.java (original)
+++ jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/FileStoreIT.java Mon Oct 22 12:26:14 2018
@@ -18,21 +18,32 @@
  */
 package org.apache.jackrabbit.oak.segment.file;
 
+import static com.google.common.collect.Maps.newLinkedHashMap;
+import static org.apache.jackrabbit.oak.segment.DefaultSegmentWriterBuilder.defaultSegmentWriterBuilder;
 import static org.apache.jackrabbit.oak.segment.file.FileStoreBuilder.fileStoreBuilder;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 
 import java.io.ByteArrayInputStream;
 import java.io.File;
+import java.io.IOException;
 import java.io.RandomAccessFile;
+import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Random;
+import java.util.concurrent.CountDownLatch;
 
 import org.apache.jackrabbit.oak.api.Blob;
 import org.apache.jackrabbit.oak.plugins.memory.ArrayBasedBlob;
+import org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState;
+import org.apache.jackrabbit.oak.segment.DefaultSegmentWriter;
 import org.apache.jackrabbit.oak.segment.RecordId;
 import org.apache.jackrabbit.oak.segment.SegmentNodeBuilder;
 import org.apache.jackrabbit.oak.segment.SegmentNodeState;
 import org.apache.jackrabbit.oak.segment.SegmentTestConstants;
+import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
@@ -173,4 +184,57 @@ public class FileStoreIT {
         }
     }
 
+    @Test
+    public void snfeAfterOnRC()
+    throws IOException, InvalidFileStoreVersionException, InterruptedException {
+        Map<String, String> roots = newLinkedHashMap();
+        try (FileStore rwStore = fileStoreBuilder(getFileStoreFolder()).build()) {
+
+            // Block scheduled journal updates
+            CountDownLatch blockJournalUpdates = new CountDownLatch(1);
+
+            // Ensure we have a non empty journal
+            rwStore.flush();
+
+            // Add a revisions
+            roots.putIfAbsent(addNode(rwStore, "g"), "g");
+
+            // Simulate compaction by writing a new head state of the next generation
+            SegmentNodeState base = rwStore.getHead();
+            GCGeneration gcGeneration = base.getRecordId().getSegmentId().getGcGeneration();
+            DefaultSegmentWriter nextGenerationWriter = defaultSegmentWriterBuilder("c")
+                    .withGeneration(gcGeneration.nextFull())
+                    .build(rwStore);
+            RecordId headId = nextGenerationWriter.writeNode(EmptyNodeState.EMPTY_NODE);
+            rwStore.getRevisions().setHead(base.getRecordId(), headId);
+
+            // Add another revisions
+            roots.putIfAbsent(addNode(rwStore, "g"), "g");
+            blockJournalUpdates.countDown();
+        }
+
+        // Open the store again in read only mode and check all revisions.
+        // This simulates accessing the store after an unclean shutdown.
+        try (ReadOnlyFileStore roStore = fileStoreBuilder(getFileStoreFolder()).buildReadOnly()) {
+            for (Entry<String, String> revision : roots.entrySet()) {
+                roStore.setRevision(revision.getKey());
+                checkNode(roStore.getHead());
+            }
+        }
+    }
+
+    private static String addNode(FileStore store, String name) throws InterruptedException {
+        SegmentNodeState base = store.getHead();
+        SegmentNodeBuilder builder = base.builder();
+        builder.setChildNode(name);
+        store.getRevisions().setHead(base.getRecordId(), builder.getNodeState().getRecordId());
+        return store.getRevisions().getHead().toString();
+    }
+
+    private static void checkNode(NodeState node) {
+        for (ChildNodeEntry cne : node.getChildNodeEntries()) {
+            checkNode(cne.getNodeState());
+        }
+    }
+
 }