You are viewing a plain text version of this content. The canonical link for it is here.
Posted to oak-commits@jackrabbit.apache.org by md...@apache.org on 2019/02/18 16:16:08 UTC
svn commit: r1853813 - in /jackrabbit/oak/branches/1.8: ./
oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterPool.java
oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java
Author: mduerig
Date: Mon Feb 18 16:16:08 2019
New Revision: 1853813
URL: http://svn.apache.org/viewvc?rev=1853813&view=rev
Log:
OAK-8033: Node states sometimes refer to more than a single generation of segments after a full compaction
Merged revision 1853429
Modified:
jackrabbit/oak/branches/1.8/ (props changed)
jackrabbit/oak/branches/1.8/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterPool.java
jackrabbit/oak/branches/1.8/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java
Propchange: jackrabbit/oak/branches/1.8/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Feb 18 16:16:08 2019
@@ -1,3 +1,3 @@
/jackrabbit/oak/branches/1.0:1665962
-/jackrabbit/oak/trunk:1820660-1820661,1820729,1820734,1820859,1820861,1820878,1820888,1820947,1821027,1821130,1821140-1821141,1821178,1821237,1821240,1821249,1821258,1821325,1821358,1821361-1821362,1821370,1821375,1821393,1821477,1821487,1821516,1821617,1821663,1821665,1821668,1821681,1821847,1821975-1821983,1822121,1822201,1822207,1822527,1822642,1822723,1822808,1822850,1822934,1823135,1823163,1823169,1823172,1823655,1823669,1824196,1824198,1824253,1824255,1824896,1824962,1825065,1825362,1825381,1825442,1825448,1825466,1825470-1825471,1825475,1825523,1825525,1825561,1825619-1825621,1825651,1825654,1825992,1826079,1826090,1826096,1826216,1826237,1826338,1826516,1826532,1826551,1826560,1826638,1826640,1826730,1826833,1826932,1826957,1827423,1827472,1827486,1827816,1827977,1828349,1828439,1828502,1828529,1828948,1829527,1829534,1829546,1829569,1829587,1829665,1829854,1829864,1829978,1829985,1829987,1829998,1830019,1830048,1830160,1830171,1830197,1830209,1830239,1830347,1830748,1830911
,1830923,1831157-1831158,1831163,1831190,1831374,1831560,1831689,1832258,1832376,1832379,1832535,1833308,1833347,1833833,1834112,1834117,1834287,1834291,1834302,1834326,1834328,1834336,1834428,1834468,1834483,1834610,1834648-1834649,1834681,1834823,1834857-1834858,1835060,1835518,1835521,1835635,1835642,1835780,1835819,1836082,1836121,1836167-1836168,1836170-1836187,1836189-1836196,1836206,1836487,1836493,1836548,1837057,1837274,1837296,1837326,1837475,1837503,1837547,1837569,1837600,1837657,1837718,1837998,1838076,1838637,1839549,1839570,1839637,1839746,1840019,1840024,1840031,1840226,1840455,1840462,1840574,1840769,1841314,1841352,1842089,1842677,1843175,1843222,1843231,1843398,1843618,1843652,1843911,1844325,1844549,1844625,1844627,1844642,1844728,1844775,1844932,1845135,1845336,1845405,1845415,1845730-1845731,1845863,1845865,1846057,1846396,1846429,1846617,1848073,1848181-1848182,1848191,1848217,1848822-1848823,1850837,1851533-1851535,1851619,1852120,1852451,1852492,1853393,1853
433
+/jackrabbit/oak/trunk:1820660-1820661,1820729,1820734,1820859,1820861,1820878,1820888,1820947,1821027,1821130,1821140-1821141,1821178,1821237,1821240,1821249,1821258,1821325,1821358,1821361-1821362,1821370,1821375,1821393,1821477,1821487,1821516,1821617,1821663,1821665,1821668,1821681,1821847,1821975-1821983,1822121,1822201,1822207,1822527,1822642,1822723,1822808,1822850,1822934,1823135,1823163,1823169,1823172,1823655,1823669,1824196,1824198,1824253,1824255,1824896,1824962,1825065,1825362,1825381,1825442,1825448,1825466,1825470-1825471,1825475,1825523,1825525,1825561,1825619-1825621,1825651,1825654,1825992,1826079,1826090,1826096,1826216,1826237,1826338,1826516,1826532,1826551,1826560,1826638,1826640,1826730,1826833,1826932,1826957,1827423,1827472,1827486,1827816,1827977,1828349,1828439,1828502,1828529,1828948,1829527,1829534,1829546,1829569,1829587,1829665,1829854,1829864,1829978,1829985,1829987,1829998,1830019,1830048,1830160,1830171,1830197,1830209,1830239,1830347,1830748,1830911
,1830923,1831157-1831158,1831163,1831190,1831374,1831560,1831689,1832258,1832376,1832379,1832535,1833308,1833347,1833833,1834112,1834117,1834287,1834291,1834302,1834326,1834328,1834336,1834428,1834468,1834483,1834610,1834648-1834649,1834681,1834823,1834857-1834858,1835060,1835518,1835521,1835635,1835642,1835780,1835819,1836082,1836121,1836167-1836168,1836170-1836187,1836189-1836196,1836206,1836487,1836493,1836548,1837057,1837274,1837296,1837326,1837475,1837503,1837547,1837569,1837600,1837657,1837718,1837998,1838076,1838637,1839549,1839570,1839637,1839746,1840019,1840024,1840031,1840226,1840455,1840462,1840574,1840769,1841314,1841352,1842089,1842677,1843175,1843222,1843231,1843398,1843618,1843652,1843911,1844325,1844549,1844625,1844627,1844642,1844728,1844775,1844932,1845135,1845336,1845405,1845415,1845730-1845731,1845863,1845865,1846057,1846396,1846429,1846617,1848073,1848181-1848182,1848191,1848217,1848822-1848823,1850837,1851533-1851535,1851619,1852120,1852451,1852492,1853393,1853
429,1853433
/jackrabbit/trunk:1345480
Modified: jackrabbit/oak/branches/1.8/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterPool.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.8/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterPool.java?rev=1853813&r1=1853812&r2=1853813&view=diff
==============================================================================
--- jackrabbit/oak/branches/1.8/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterPool.java (original)
+++ jackrabbit/oak/branches/1.8/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterPool.java Mon Feb 18 16:16:08 2019
@@ -110,7 +110,7 @@ public class SegmentBufferWriterPool imp
@NotNull WriteOperation writeOperation)
throws IOException {
SimpleImmutableEntry<?,?> key = new SimpleImmutableEntry<>(currentThread(), gcGeneration);
- SegmentBufferWriter writer = borrowWriter(key);
+ SegmentBufferWriter writer = borrowWriter(key, gcGeneration);
try {
return writeOperation.execute(writer);
} finally {
@@ -199,7 +199,7 @@ public class SegmentBufferWriterPool imp
* a fresh writer at any time. Callers need to return a writer before
* borrowing it again. Failing to do so leads to undefined behaviour.
*/
- private SegmentBufferWriter borrowWriter(Object key) {
+ private SegmentBufferWriter borrowWriter(@NotNull Object key, @NotNull GCGeneration gcGeneration) {
poolMonitor.enter();
try {
SegmentBufferWriter writer = writers.remove(key);
@@ -208,7 +208,7 @@ public class SegmentBufferWriterPool imp
idProvider,
reader,
getWriterId(wid),
- gcGeneration.get()
+ gcGeneration
);
}
borrowed.add(writer);
Modified: jackrabbit/oak/branches/1.8/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.8/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java?rev=1853813&r1=1853812&r2=1853813&view=diff
==============================================================================
--- jackrabbit/oak/branches/1.8/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java (original)
+++ jackrabbit/oak/branches/1.8/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java Mon Feb 18 16:16:08 2019
@@ -21,6 +21,7 @@ package org.apache.jackrabbit.oak.segmen
import static com.google.common.collect.Lists.newArrayList;
import static com.google.common.collect.Sets.newHashSet;
+import static com.google.common.util.concurrent.Uninterruptibles.awaitUninterruptibly;
import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
import static java.lang.Integer.getInteger;
import static java.lang.String.valueOf;
@@ -51,6 +52,7 @@ import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
@@ -59,6 +61,7 @@ import java.util.concurrent.ScheduledExe
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Consumer;
import com.google.common.io.ByteStreams;
import org.apache.jackrabbit.oak.api.Blob;
@@ -68,6 +71,7 @@ import org.apache.jackrabbit.oak.api.Typ
import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser;
import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore;
+import org.apache.jackrabbit.oak.plugins.memory.StringPropertyState;
import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions;
import org.apache.jackrabbit.oak.segment.file.FileStore;
import org.apache.jackrabbit.oak.segment.file.FileStoreGCMonitor;
@@ -648,37 +652,36 @@ public class CompactionAndCleanupIT {
.withMaxFileSize(2)
.withMemoryMapping(true)
.build();
- final SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(store).build();
- final AtomicBoolean compactionSuccess = new AtomicBoolean(true);
+ SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(store).build();
+ AtomicBoolean compactionSuccess = new AtomicBoolean(true);
NodeBuilder root = nodeStore.getRoot().builder();
createNodes(root.setChildNode("test"), 10, 3);
nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
- final Set<UUID> beforeSegments = new HashSet<UUID>();
- collectSegments(store.getReader(), store.getRevisions(), beforeSegments);
+ Set<UUID> beforeSegments = new HashSet<UUID>();
+ collectSegments(store.getReader(), store.getHead().getRecordId(),
+ segmentId -> beforeSegments.add(segmentId.asUUID()));
+
final AtomicReference<Boolean> run = new AtomicReference<Boolean>(true);
- final List<String> failedCommits = newArrayList();
+ final List<Exception> failedCommits = newArrayList();
Thread[] threads = new Thread[10];
for (int k = 0; k < threads.length; k++) {
final int threadId = k;
- threads[k] = new Thread(new Runnable() {
- @Override
- public void run() {
- for (int j = 0; run.get(); j++) {
- String nodeName = "b-" + threadId + "," + j;
- try {
- NodeBuilder root = nodeStore.getRoot().builder();
- root.setChildNode(nodeName);
- nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY);
- Thread.sleep(5);
- } catch (CommitFailedException e) {
- failedCommits.add(nodeName);
- } catch (InterruptedException e) {
- Thread.interrupted();
- break;
- }
+ threads[k] = new Thread(() -> {
+ for (int j = 0; run.get(); j++) {
+ String nodeName = "b-" + threadId + "," + j;
+ try {
+ NodeBuilder changes = nodeStore.getRoot().builder();
+ changes.setChildNode(nodeName);
+ nodeStore.merge(changes, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+ Thread.sleep(5);
+ } catch (InterruptedException e) {
+ Thread.interrupted();
+ break;
+ } catch (Exception e) {
+ failedCommits.add(new ExecutionException("Failed commit " + nodeName, e));
}
}
});
@@ -692,10 +695,13 @@ public class CompactionAndCleanupIT {
store.flush();
assumeTrue("Failed to acquire compaction lock", compactionSuccess.get());
- assertTrue("Failed commits: " + failedCommits, failedCommits.isEmpty());
+ for (Exception failedCommit : failedCommits) {
+ throw new Exception("A background commit failed", failedCommit);
+ }
Set<UUID> afterSegments = new HashSet<UUID>();
- collectSegments(store.getReader(), store.getRevisions(), afterSegments);
+ collectSegments(store.getReader(), store.getHead().getRecordId(),
+ segmentId -> afterSegments.add(segmentId.asUUID()));
try {
for (UUID u : beforeSegments) {
assertFalse("Mixed segments found: " + u, afterSegments.contains(u));
@@ -705,6 +711,51 @@ public class CompactionAndCleanupIT {
}
}
+ @Test
+ public void testMixedSegmentsGCGeneration() throws Exception {
+ try (FileStore store = fileStoreBuilder(getFileStoreFolder())
+ .withMaxFileSize(2)
+ .withMemoryMapping(true)
+ .build()) {
+
+ CountDownLatch readyToCompact = new CountDownLatch(1);
+ CountDownLatch compactionCompleted = new CountDownLatch(1);
+ SegmentNodeBuilder changes = store.getHead().builder();
+ changes.setProperty("a", "a");
+ changes.setProperty(new StringPropertyState("b", "b") {
+ @Override
+ public String getValue() {
+ readyToCompact.countDown();
+ awaitUninterruptibly(compactionCompleted);
+ return super.getValue();
+ }
+ });
+
+ // Overlap an ongoing write operation triggered by the call to getNodeState
+ // with a full compaction. This should not cause the written node state
+ // to reference segments from multiple generations
+ FutureTask<SegmentNodeState> futureNodeState = runAsync(changes::getNodeState);
+ readyToCompact.await();
+
+ store.compactFull();
+ compactionCompleted.countDown();
+
+ // The node state from the write operation that started before
+ // compaction completed should reference only segments from generation 0.
+ SegmentNodeState gen0NodeState = futureNodeState.get();
+ collectSegments(store.getReader(), gen0NodeState.getRecordId(),
+ segmentId -> assertEquals("Full generation should be 0",
+ 0, segmentId.getGcGeneration().getFullGeneration()));
+
+ // Retrieving the node state again should trigger a rewriting to
+ // the next generation (1).
+ SegmentNodeState gen1NodeState = changes.getNodeState();
+ collectSegments(store.getReader(), gen1NodeState.getRecordId(),
+ segmentId -> assertEquals("Full generation should be 1",
+ 1, segmentId.getGcGeneration().getFullGeneration()));
+ }
+ }
+
/**
* Set a root node referring to a child node that lives in a different segments. Depending
* on the order how the SegmentBufferWriters associated with the threads used to create the
@@ -839,81 +890,80 @@ public class CompactionAndCleanupIT {
}
}
- private static void collectSegments(SegmentReader reader, Revisions revisions,
- final Set<UUID> segmentIds) {
+ private static void collectSegments(SegmentReader reader, RecordId headId, Consumer<SegmentId> onSegment) {
new SegmentParser(reader) {
@Override
protected void onNode(RecordId parentId, RecordId nodeId) {
super.onNode(parentId, nodeId);
- segmentIds.add(nodeId.asUUID());
+ onSegment.accept(nodeId.getSegmentId());
}
@Override
protected void onTemplate(RecordId parentId, RecordId templateId) {
super.onTemplate(parentId, templateId);
- segmentIds.add(templateId.asUUID());
+ onSegment.accept(templateId.getSegmentId());
}
@Override
protected void onMap(RecordId parentId, RecordId mapId, MapRecord map) {
super.onMap(parentId, mapId, map);
- segmentIds.add(mapId.asUUID());
+ onSegment.accept(mapId.getSegmentId());
}
@Override
protected void onMapDiff(RecordId parentId, RecordId mapId, MapRecord map) {
super.onMapDiff(parentId, mapId, map);
- segmentIds.add(mapId.asUUID());
+ onSegment.accept(mapId.getSegmentId());
}
@Override
protected void onMapLeaf(RecordId parentId, RecordId mapId, MapRecord map) {
super.onMapLeaf(parentId, mapId, map);
- segmentIds.add(mapId.asUUID());
+ onSegment.accept(mapId.getSegmentId());
}
@Override
protected void onMapBranch(RecordId parentId, RecordId mapId, MapRecord map) {
super.onMapBranch(parentId, mapId, map);
- segmentIds.add(mapId.asUUID());
+ onSegment.accept(mapId.getSegmentId());
}
@Override
protected void onProperty(RecordId parentId, RecordId propertyId, PropertyTemplate template) {
super.onProperty(parentId, propertyId, template);
- segmentIds.add(propertyId.asUUID());
+ onSegment.accept(propertyId.getSegmentId());
}
@Override
protected void onValue(RecordId parentId, RecordId valueId, Type<?> type) {
super.onValue(parentId, valueId, type);
- segmentIds.add(valueId.asUUID());
+ onSegment.accept(valueId.getSegmentId());
}
@Override
protected void onBlob(RecordId parentId, RecordId blobId) {
super.onBlob(parentId, blobId);
- segmentIds.add(blobId.asUUID());
+ onSegment.accept(blobId.getSegmentId());
}
@Override
protected void onString(RecordId parentId, RecordId stringId) {
super.onString(parentId, stringId);
- segmentIds.add(stringId.asUUID());
+ onSegment.accept(stringId.getSegmentId());
}
@Override
protected void onList(RecordId parentId, RecordId listId, int count) {
super.onList(parentId, listId, count);
- segmentIds.add(listId.asUUID());
+ onSegment.accept(listId.getSegmentId());
}
@Override
protected void onListBucket(RecordId parentId, RecordId listId, int index, int count, int capacity) {
super.onListBucket(parentId, listId, index, count, capacity);
- segmentIds.add(listId.asUUID());
+ onSegment.accept(listId.getSegmentId());
}
- }.parseNode(revisions.getHead());
+ }.parseNode(headId);
}
private static void createNodes(NodeBuilder builder, int count, int depth) {