You are viewing a plain text version of this content. The canonical link for it is here.
Posted to oak-commits@jackrabbit.apache.org by am...@apache.org on 2018/09/13 07:51:53 UTC

svn commit: r1840785 - in /jackrabbit/oak/trunk: oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/ oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/ oak-blob-plugins/src/test/java/org/apache/jackrabbi...

Author: amitj
Date: Thu Sep 13 07:51:52 2018
New Revision: 1840785

URL: http://svn.apache.org/viewvc?rev=1840785&view=rev
Log:
OAK-7727: Aggregate references from repositories with same repositoryId

- A different references file is created and then aggregated to account for all references available

Modified:
    jackrabbit/oak/trunk/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/MarkSweepGarbageCollector.java
    jackrabbit/oak/trunk/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/SharedDataStoreUtils.java
    jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCCheckpointRefTest.java
    jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCTest.java
    jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreUtilsTest.java
    jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java

Modified: jackrabbit/oak/trunk/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/MarkSweepGarbageCollector.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/MarkSweepGarbageCollector.java?rev=1840785&r1=1840784&r2=1840785&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/MarkSweepGarbageCollector.java (original)
+++ jackrabbit/oak/trunk/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/MarkSweepGarbageCollector.java Thu Sep 13 07:51:52 2018
@@ -793,8 +793,15 @@ public class MarkSweepGarbageCollector i
             @Override
             void addMarked(GarbageCollectableBlobStore blobStore, GarbageCollectorFileState fs,
                     String repoId) throws DataStoreException, IOException {
-                ((SharedDataStore) blobStore)
-                    .addMetadataRecord(fs.getMarkedRefs(), SharedStoreRecordType.REFERENCES.getNameFromId(repoId));
+                boolean exists = ((SharedDataStore) blobStore)
+                    .metadataRecordExists(SharedStoreRecordType.REFERENCES.getNameFromId(repoId));
+                if (exists) {
+                    LOG.info("References for repository id {} already exists. Creating a duplicate one. "
+                        + "Please check for inadvertent sharing of repository id by different repositories", repoId);
+                }
+
+                ((SharedDataStore) blobStore).addMetadataRecord(fs.getMarkedRefs(), SharedStoreRecordType.REFERENCES
+                    .getNameFromIdPrefix(repoId, String.valueOf(System.currentTimeMillis())));
             }
             
             @Override

Modified: jackrabbit/oak/trunk/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/SharedDataStoreUtils.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/SharedDataStoreUtils.java?rev=1840785&r1=1840784&r2=1840785&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/SharedDataStoreUtils.java (original)
+++ jackrabbit/oak/trunk/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/SharedDataStoreUtils.java Thu Sep 13 07:51:52 2018
@@ -25,7 +25,6 @@ import com.google.common.base.Splitter;
 import com.google.common.collect.FluentIterable;
 import com.google.common.collect.Ordering;
 import com.google.common.collect.Sets;
-
 import org.apache.jackrabbit.core.data.DataRecord;
 import org.apache.jackrabbit.oak.plugins.blob.SharedDataStore;
 import org.apache.jackrabbit.oak.spi.blob.BlobStore;
@@ -81,7 +80,7 @@ public class SharedDataStoreUtils {
                         return SharedStoreRecordType.REPOSITORY.getIdFromName(input.getIdentifier().toString());
                     }
                 }).keySet(),
-                FluentIterable.from(refs).uniqueIndex(
+                FluentIterable.from(refs).index(
                         new Function<DataRecord, String>() {
                             @Override
                             @Nullable
@@ -111,14 +110,29 @@ public class SharedDataStoreUtils {
         }
 
         public String getIdFromName(String name) {
-            return Splitter.on(DELIIM).limit(2).splitToList(name).get(1);
+            return Splitter.on("_").limit(2).splitToList(
+                Splitter.on(DELIM).limit(2).splitToList(name).get(1)).get(0);
         }
 
         public String getNameFromId(String id) {
-            return Joiner.on(DELIIM).join(getType(), id);
+            return Joiner.on(DELIM).join(getType(), id);
+        }
+
+        /**
+         * Creates name from id and prefix. The format returned is of the form
+         * references-id_prefix.
+         *
+         * @param id
+         * @param prefix
+         * @return
+         */
+        public String getNameFromIdPrefix(String id, String prefix) {
+            return Joiner.on("_").join(
+                Joiner.on(DELIM).join(getType(), id),
+                prefix);
         }
 
-        static final String DELIIM = "-";
+        static final String DELIM = "-";
     }
 }
 

Modified: jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCCheckpointRefTest.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCCheckpointRefTest.java?rev=1840785&r1=1840784&r2=1840785&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCCheckpointRefTest.java (original)
+++ jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCCheckpointRefTest.java Thu Sep 13 07:51:52 2018
@@ -19,11 +19,8 @@
 package org.apache.jackrabbit.oak.plugins.blob;
 
 import java.util.Date;
-import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
 
 import javax.management.openmbean.TabularData;
 
@@ -31,8 +28,6 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import org.apache.jackrabbit.oak.api.jmx.CheckpointMBean;
 import org.apache.jackrabbit.oak.spi.state.NodeStore;
-import org.apache.jackrabbit.oak.spi.whiteboard.DefaultWhiteboard;
-import org.apache.jackrabbit.oak.spi.whiteboard.Registration;
 import org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardUtils;
 import org.apache.jackrabbit.oak.stats.Clock;
 import org.junit.Before;
@@ -44,11 +39,13 @@ import static org.junit.Assert.assertTru
  * Adds BlobGC tests related to retrieving oldest checkpoint reference
  */
 public class BlobGCCheckpointRefTest extends BlobGCTest {
+    protected CheckpointMBean checkpointMBean;
+
     @Override
     @Before
-    public void before() {
+    public void before() throws Exception {
         super.before();
-        checkpointMBean = new MemoryStoreCheckpointMBean(nodeStore, clock);
+        checkpointMBean = new MemoryStoreCheckpointMBean(cluster.nodeStore, clock);
         WhiteboardUtils.registerMBean(wb, CheckpointMBean.class, checkpointMBean,
             CheckpointMBean.TYPE, "Test checkpoint mbean");
     }
@@ -57,19 +54,17 @@ public class BlobGCCheckpointRefTest ext
     public void gcCheckpointHeld() throws Exception {
         log.info("Staring gcCheckpointHeld()");
 
-        BlobStoreState state = setUp(10, 5, 100);
         long afterSetupTime = clock.getTime();
         log.info("afterSetupTime {}", afterSetupTime);
 
         checkpointMBean.createCheckpoint(100);
-        Set<String> afterCheckpointBlobs = createBlobs(2, 100);
-        Set<String> present = Sets.union(state.blobsPresent, afterCheckpointBlobs);
+        Set<String> afterCheckpointBlobs = createBlobs(cluster.blobStore, 2, 100);
+        Set<String> present = Sets.union(cluster.blobStoreState.blobsPresent, afterCheckpointBlobs);
         long maxGcAge = checkpointMBean.getOldestCheckpointCreationTimestamp() - afterSetupTime;
 
-        log.info("{} blobs added : {}", state.blobsAdded.size(), state.blobsAdded);
         log.info("{} blobs remaining : {}", present.size(), present);
 
-        Set<String> existingAfterGC = gcInternal(maxGcAge);
+        Set<String> existingAfterGC = executeGarbageCollection(cluster, cluster.getCollector(maxGcAge), false);
         assertTrue(Sets.symmetricDifference(present, existingAfterGC).isEmpty());
     }
 
@@ -77,40 +72,35 @@ public class BlobGCCheckpointRefTest ext
     public void gcCheckpointHeldNoAddition() throws Exception {
         log.info("Staring gcCheckpointHeldNoAddition()");
 
-        BlobStoreState state = setUp(10, 5, 100);
         long afterSetupTime = clock.getTime();
         log.info("afterSetupTime {}", afterSetupTime);
 
         checkpointMBean.createCheckpoint(100);
         long maxGcAge = checkpointMBean.getOldestCheckpointCreationTimestamp() - afterSetupTime;
 
-        log.info("{} blobs added : {}", state.blobsAdded.size(), state.blobsAdded);
-        log.info("{} blobs remaining : {}", state.blobsPresent.size(), state.blobsPresent);
-
-        Set<String> existingAfterGC = gcInternal(maxGcAge);
-        assertTrue(Sets.symmetricDifference(state.blobsPresent, existingAfterGC).isEmpty());
+        Set<String> existingAfterGC = executeGarbageCollection(cluster, cluster.getCollector(maxGcAge), false);
+        assertTrue(Sets.symmetricDifference(cluster.blobStoreState.blobsPresent, existingAfterGC).isEmpty());
     }
 
     @Test
     public void gcCheckpointHeldMaxAgeChange() throws Exception {
         log.info("Staring gcCheckpointHeldMaxAgeChange()");
-        startReferenceTime = clock.getTime();
 
-        BlobStoreState state = setUp(10, 5, 100);
         long afterSetupTime = clock.getTime();
         log.info("{} afterSetupTime time", afterSetupTime);
 
         checkpointMBean.createCheckpoint(100);
-        Set<String> afterCheckpointBlobs = createBlobs(2, 100);
-        state.blobsPresent.addAll(afterCheckpointBlobs);
+        Set<String> afterCheckpointBlobs = createBlobs(cluster.blobStore, 2, 100);
+        cluster.blobStoreState.blobsPresent.addAll(afterCheckpointBlobs);
 
-        log.info("{} blobs added : {}", state.blobsAdded.size(), state.blobsAdded);
-        log.info("{} blobs remaining : {}", state.blobsPresent.size(), state.blobsPresent);
+        log.info("{} blobs added : {}", cluster.blobStoreState.blobsAdded.size(), cluster.blobStoreState.blobsAdded);
+        log.info("{} blobs remaining : {}", cluster.blobStoreState.blobsPresent.size(), cluster.blobStoreState.blobsPresent);
 
         long maxGcAge = checkpointMBean.getOldestCheckpointCreationTimestamp() - afterSetupTime;
         log.info("Max age configured {}", maxGcAge);
-        Set<String> existingAfterGC = gcInternal(maxGcAge);
-        assertTrue(Sets.symmetricDifference(state.blobsPresent, existingAfterGC).isEmpty());
+
+        Set<String> existingAfterGC = executeGarbageCollection(cluster, cluster.getCollector(maxGcAge), false);
+        assertTrue(Sets.symmetricDifference(cluster.blobStoreState.blobsPresent, existingAfterGC).isEmpty());
     }
 
     /**

Modified: jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCTest.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCTest.java?rev=1840785&r1=1840784&r2=1840785&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCTest.java (original)
+++ jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/BlobGCTest.java Thu Sep 13 07:51:52 2018
@@ -20,6 +20,7 @@
 package org.apache.jackrabbit.oak.plugins.blob;
 
 import java.io.ByteArrayInputStream;
+import java.io.Closeable;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
@@ -43,16 +44,16 @@ import com.google.common.collect.Iterato
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
+import com.google.common.io.Closer;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.io.output.NullOutputStream;
 import org.apache.jackrabbit.core.data.DataIdentifier;
 import org.apache.jackrabbit.core.data.DataRecord;
 import org.apache.jackrabbit.core.data.DataStoreException;
 import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.CommitFailedException;
 import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser;
 import org.apache.jackrabbit.oak.commons.junit.LogCustomizer;
-import org.apache.jackrabbit.oak.api.CommitFailedException;
-import org.apache.jackrabbit.oak.api.jmx.CheckpointMBean;
 import org.apache.jackrabbit.oak.plugins.blob.datastore.SharedDataStoreUtils;
 import org.apache.jackrabbit.oak.plugins.memory.ArrayBasedBlob;
 import org.apache.jackrabbit.oak.plugins.memory.MemoryNodeStore;
@@ -69,6 +70,7 @@ import org.apache.jackrabbit.oak.spi.whi
 import org.apache.jackrabbit.oak.spi.whiteboard.Whiteboard;
 import org.apache.jackrabbit.oak.stats.Clock;
 import org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
 import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
 import org.junit.After;
@@ -80,17 +82,14 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static org.apache.commons.codec.binary.Hex.encodeHexString;
-import static org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.GarbageCollectionOperationStats
-    .FINISH_FAILURE;
+import static org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.GarbageCollectionOperationStats.FINISH_FAILURE;
 import static org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.GarbageCollectionOperationStats.NAME;
-import static org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.GarbageCollectionOperationStats
-    .NUM_BLOBS_DELETED;
-import static org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.GarbageCollectionOperationStats
-    .NUM_CANDIDATES;
+import static org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.GarbageCollectionOperationStats.NUM_BLOBS_DELETED;
+import static org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.GarbageCollectionOperationStats.NUM_CANDIDATES;
 import static org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.GarbageCollectionOperationStats.START;
-import static org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.GarbageCollectionOperationStats
-    .TOTAL_SIZE_DELETED;
+import static org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.GarbageCollectionOperationStats.TOTAL_SIZE_DELETED;
 import static org.apache.jackrabbit.oak.plugins.blob.OperationsStatsMBean.TYPE;
+import static org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils.randomStream;
 import static org.apache.jackrabbit.oak.plugins.blob.datastore.SharedDataStoreUtils.SharedStoreRecordType.REPOSITORY;
 import static org.apache.jackrabbit.oak.stats.StatsOptions.METRICS_ONLY;
 import static org.junit.Assert.assertEquals;
@@ -105,23 +104,19 @@ public class BlobGCTest {
     @Rule
     public TemporaryFolder folder = new TemporaryFolder(new File("target"));
 
-    protected GarbageCollectableBlobStore blobStore;
-    protected NodeStore nodeStore;
     protected Whiteboard wb;
-    protected long startReferenceTime;
 
-    protected BlobReferenceRetriever referenceRetriever;
-    protected CheckpointMBean checkpointMBean;
-    protected ScheduledExecutorService scheduledExecutor;
-    protected ThreadPoolExecutor executor;
-    protected DefaultStatisticsProvider statsProvider;
+    protected Closer closer;
+
+    protected Cluster cluster;
+
     protected Clock clock;
 
     @Before
-    public void before() {
+    public void before() throws Exception {
+        closer = Closer.create();
         clock = getClock();
-        blobStore = new TimeLapsedBlobStore();
-        nodeStore = new MemoryBlobStoreNodeStore(blobStore);
+
         // add whiteboard
         final AtomicReference<Map<?, ?>> props = new AtomicReference<Map<?, ?>>();
         wb = new DefaultWhiteboard(){
@@ -131,63 +126,169 @@ public class BlobGCTest {
                 return super.register(type, service, properties);
             }
         };
-        referenceRetriever = ((MemoryBlobStoreNodeStore) nodeStore).getBlobReferenceRetriever();
-        startReferenceTime = ((TimeLapsedBlobStore) blobStore).startTime;
-        scheduledExecutor = Executors.newSingleThreadScheduledExecutor();
-        executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
-        statsProvider = new DefaultStatisticsProvider(scheduledExecutor);
+
+        TimeLapsedBlobStore blobStore = new TimeLapsedBlobStore();
+        MemoryBlobStoreNodeStore nodeStore = new MemoryBlobStoreNodeStore(blobStore);
+        cluster = new Cluster(folder.newFolder(), blobStore, nodeStore, 0);
+        closer.register(cluster);
     }
 
     @After
     public void after() {
-        new ExecutorCloser(scheduledExecutor).close();
-        new ExecutorCloser(executor).close();
+        try {
+            closer.close();
+        } catch (IOException e) {
+            log.error("Error closing cluster instances", e);
+        }
     }
 
     protected Clock getClock() {
         return new Clock.Virtual();
     }
 
+    class Cluster implements Closeable {
+        protected final BlobStoreState blobStoreState;
+        private final File root;
+        String repoId;
+        protected final GarbageCollectableBlobStore blobStore;
+        protected final NodeStore nodeStore;
+        private MarkSweepGarbageCollector collector;
+        protected BlobReferenceRetriever referenceRetriever;
+        protected ScheduledExecutorService scheduledExecutor;
+        protected ThreadPoolExecutor executor;
+        protected DefaultStatisticsProvider statsProvider;
+        protected long startReferenceTime;
+
+        public Cluster(File root, GarbageCollectableBlobStore blobStore, NodeStore nodeStore, int seed) throws Exception {
+            this.root = root;
+            this.nodeStore = nodeStore;
+            this.blobStore = blobStore;
+            if (SharedDataStoreUtils.isShared(blobStore)) {
+                repoId = ClusterRepositoryInfo.getOrCreateId(nodeStore);
+                ((SharedDataStore) blobStore).addMetadataRecord(
+                    new ByteArrayInputStream(new byte[0]),
+                    REPOSITORY.getNameFromId(repoId));
+            }
+            referenceRetriever = ((MemoryBlobStoreNodeStore) nodeStore).getBlobReferenceRetriever();
+            startReferenceTime = clock.getTime();
+            log.info("Reference time {}", startReferenceTime);
+            scheduledExecutor = Executors.newSingleThreadScheduledExecutor();
+            executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
+            statsProvider = new DefaultStatisticsProvider(scheduledExecutor);
+
+            blobStoreState = setUp(nodeStore, blobStore, 10, 5, 100, seed);
+        }
+
+        public void setRepoId(String id) {
+            this.repoId = id;
+        }
+
+        public MarkSweepGarbageCollector getCollector(long blobGcMaxAgeInSecs) throws Exception {
+            collector =
+                new MarkSweepGarbageCollector(referenceRetriever, blobStore, executor, root.getAbsolutePath(), 2048,
+                    blobGcMaxAgeInSecs, repoId, wb, statsProvider);
+            return collector;
+        }
+
+        @Override public void close() throws IOException {
+            new ExecutorCloser(scheduledExecutor).close();
+            new ExecutorCloser(executor).close();
+        }
+    }
+
     @Test
-    public void gc() throws Exception {
-        log.info("Staring gc()");
+    public void sharedGC() throws Exception {
+        log.info("Staring sharedGC()");
 
-        BlobStoreState state = setUp(10, 5, 100);
+        // Setup a different cluster/repository sharing the blob store
+        MemoryBlobStoreNodeStore secondClusterNodeStore = new MemoryBlobStoreNodeStore(cluster.blobStore);
+        Cluster secondCluster = new Cluster(folder.newFolder(), cluster.blobStore, secondClusterNodeStore, 100);
+        closer.register(secondCluster);
+
+        Sets.SetView<String> totalPresent =
+            Sets.union(cluster.blobStoreState.blobsPresent, secondCluster.blobStoreState.blobsPresent);
+        Sets.SetView<String> totalAdded =
+            Sets.union(cluster.blobStoreState.blobsAdded, secondCluster.blobStoreState.blobsAdded);
+
+        // Execute mark on the default cluster
+        executeGarbageCollection(cluster, cluster.getCollector(0), true);
+        Set<String> existingAfterGC = executeGarbageCollection(secondCluster, secondCluster.getCollector(0), false);
+
+        assertTrue(Sets.symmetricDifference(totalPresent, existingAfterGC).isEmpty());
+        assertStats(secondCluster.statsProvider, 1, 0, totalAdded.size() - totalPresent.size(),
+            totalAdded.size() - totalPresent.size());
+    }
 
-        log.info("{} blobs added : {}", state.blobsAdded.size(), state.blobsAdded);
-        log.info("{} blobs remaining : {}", state.blobsPresent.size(), state.blobsPresent);
+    @Test
+    public void noSharedGC() throws Exception {
+        log.info("Staring noSharedGC()");
+
+        // Setup a different cluster/repository sharing the blob store
+        MemoryBlobStoreNodeStore secondClusterNodeStore = new MemoryBlobStoreNodeStore(cluster.blobStore);
+        Cluster secondCluster = new Cluster(folder.newFolder(), cluster.blobStore, secondClusterNodeStore, 100);
+        closer.register(secondCluster);
+
+        Sets.SetView<String> totalAdded =
+            Sets.union(cluster.blobStoreState.blobsAdded, secondCluster.blobStoreState.blobsAdded);
 
-        Set<String> existingAfterGC = gcInternal(0);
-        assertTrue(Sets.symmetricDifference(state.blobsPresent, existingAfterGC).isEmpty());
-        assertStats(1, 0, state.blobsAdded.size() - state.blobsPresent.size(),
-            state.blobsAdded.size() - state.blobsPresent.size());
+        Set<String> existingAfterGC = executeGarbageCollection(secondCluster, secondCluster.getCollector(0), false);
+
+        assertEquals(totalAdded, existingAfterGC);
+        assertStats(secondCluster.statsProvider, 1, 0, 0, 0);
+    }
+
+    @Test
+    public void sharedGCRepositoryCloned() throws Exception {
+        log.debug("Running sharedGCRepoCloned()");
+
+        // Setup a different cluster/repository sharing the blob store and the repository id
+        MemoryBlobStoreNodeStore secondClusterNodeStore = new MemoryBlobStoreNodeStore(cluster.blobStore);
+        Cluster secondCluster = new Cluster(folder.newFolder(), cluster.blobStore, secondClusterNodeStore, 100);
+        closer.register(secondCluster);
+
+        ((SharedDataStore) secondCluster.blobStore).deleteMetadataRecord(REPOSITORY.getNameFromId(secondCluster.repoId));
+        secondCluster.setRepoId(cluster.repoId);
+
+        Sets.SetView<String> totalPresent =
+            Sets.union(cluster.blobStoreState.blobsPresent, secondCluster.blobStoreState.blobsPresent);
+
+        // Execute mark on the default cluster
+        executeGarbageCollection(cluster, cluster.getCollector(0), true);
+        Set<String> existingAfterGC = executeGarbageCollection(secondCluster, secondCluster.getCollector(0), false);
+
+        assertTrue(Sets.symmetricDifference(totalPresent, existingAfterGC).isEmpty());
+    }
+
+    @Test
+    public void gc() throws Exception {
+        log.info("Staring gc()");
+
+        Set<String> existingAfterGC = executeGarbageCollection(cluster, cluster.getCollector(0), false);
+        assertTrue(Sets.symmetricDifference(cluster.blobStoreState.blobsPresent, existingAfterGC).isEmpty());
+        assertStats(cluster.statsProvider, 1, 0,
+            cluster.blobStoreState.blobsAdded.size() - cluster.blobStoreState.blobsPresent.size(),
+            cluster.blobStoreState.blobsAdded.size() - cluster.blobStoreState.blobsPresent.size());
     }
 
     @Test
     public void noGc() throws Exception {
         log.info("Staring noGc()");
-        startReferenceTime = clock.getTime();
 
-        BlobStoreState state = setUp(10, 5, 100);
         long afterSetupTime = clock.getTime();
+        log.info("after setup time {}", afterSetupTime);
 
-        log.info("{} blobs added : {}", state.blobsAdded.size(), state.blobsAdded);
-        log.info("{} blobs remaining : {}", state.blobsPresent.size(), state.blobsPresent);
-
-        Set<String> existingAfterGC = gcInternal(afterSetupTime - startReferenceTime + 2);
-        assertTrue(Sets.symmetricDifference(state.blobsAdded, existingAfterGC).isEmpty());
-        assertStats(1, 0, 0, state.blobsAdded.size() - state.blobsPresent.size());
+        Set<String> existingAfterGC =
+            executeGarbageCollection(cluster, cluster.getCollector(afterSetupTime - cluster.startReferenceTime + 2),
+                false);
+        assertTrue(Sets.symmetricDifference(cluster.blobStoreState.blobsAdded, existingAfterGC).isEmpty());
+        assertStats(cluster.statsProvider, 1, 0, 0,
+            cluster.blobStoreState.blobsAdded.size() - cluster.blobStoreState.blobsPresent.size());
     }
 
     @Test
     public void gcCheckDeletedSize() throws Exception {
         log.info("Staring gcCheckDeletedSize()");
 
-        BlobStoreState state = setUp(10, 5, 100);
-
-        log.info("{} blobs added : {}", state.blobsAdded.size(), state.blobsAdded);
-        log.info("{} blobs remaining : {}", state.blobsPresent.size(), state.blobsPresent);
-
         // Capture logs for the second round of gc
         LogCustomizer customLogs = LogCustomizer
             .forLogger(MarkSweepGarbageCollector.class.getName())
@@ -197,82 +298,54 @@ public class BlobGCTest {
             .create();
         customLogs.starting();
 
-        Set<String> existingAfterGC = gcInternal(0);
+        Set<String> existingAfterGC =
+            executeGarbageCollection(cluster, cluster.getCollector(0),false);
         assertEquals(1, customLogs.getLogs().size());
-        long deletedSize = (state.blobsAdded.size() - state.blobsPresent.size()) * 100;
+        long deletedSize = (cluster.blobStoreState.blobsAdded.size() - cluster.blobStoreState.blobsPresent.size()) * 100;
         assertTrue(customLogs.getLogs().get(0).contains(String.valueOf(deletedSize)));
-        assertStats(1, 0, state.blobsAdded.size() - state.blobsPresent.size(),
-            state.blobsAdded.size() - state.blobsPresent.size());
-        assertEquals(deletedSize, getStatCount(TOTAL_SIZE_DELETED));
+        assertStats(cluster.statsProvider, 1, 0,
+            cluster.blobStoreState.blobsAdded.size() - cluster.blobStoreState.blobsPresent.size(),
+            cluster.blobStoreState.blobsAdded.size() - cluster.blobStoreState.blobsPresent.size());
+        assertEquals(deletedSize, getStatCount(cluster.statsProvider, TOTAL_SIZE_DELETED));
 
         customLogs.finished();
-        assertTrue(Sets.symmetricDifference(state.blobsPresent, existingAfterGC).isEmpty());
+        assertTrue(Sets.symmetricDifference(cluster.blobStoreState.blobsPresent, existingAfterGC).isEmpty());
     }
 
     @Test
     public void gcMarkOnly() throws Exception {
         log.info("Staring gcMarkOnly()");
 
-        BlobStoreState state = setUp(10, 5, 100);
-
-        log.info("{} blobs added : {}", state.blobsAdded.size(), state.blobsAdded);
-        log.info("{} blobs remaining : {}", state.blobsPresent.size(), state.blobsPresent);
-
-        Set<String> existingAfterGC = gcInternal(0, true);
-        assertTrue(Sets.symmetricDifference(state.blobsAdded, existingAfterGC).isEmpty());
-        assertStats(1, 0, 0, 0);
-    }
-
-    protected Set<String> gcInternal(long maxBlobGcInSecs) throws Exception {
-        return gcInternal(maxBlobGcInSecs, false);
+        Set<String> existingAfterGC =
+            executeGarbageCollection(cluster, cluster.getCollector(0),true);
+        assertTrue(Sets.symmetricDifference(cluster.blobStoreState.blobsAdded, existingAfterGC).isEmpty());
+        assertStats(cluster.statsProvider, 1, 0, 0, 0);
     }
 
-    protected Set<String> gcInternal(long maxBlobGcInSecs, boolean markOnly) throws Exception {
-        MarkSweepGarbageCollector gc = initGC(maxBlobGcInSecs, executor);
-        gc.collectGarbage(markOnly);
+    protected Set<String> executeGarbageCollection(Cluster cluster, MarkSweepGarbageCollector collector, boolean markOnly)
+        throws Exception {
+        collector.collectGarbage(markOnly);
 
-        assertEquals(0, executor.getTaskCount());
-        Set<String> existingAfterGC = iterate();
+        assertEquals(0, cluster.executor.getTaskCount());
+        Set<String> existingAfterGC = iterate(cluster.blobStore);
         log.info("{} blobs existing after gc : {}", existingAfterGC.size(), existingAfterGC);
 
         return existingAfterGC;
     }
 
-    private void assertStats(int start, int failure, long deleted, long candidates) {
-        assertEquals("Start counter mismatch", start, getStatCount(START));
-        assertEquals("Finish error mismatch", failure, getStatCount(FINISH_FAILURE));
-        assertEquals("Num deleted mismatch", deleted, getStatCount(NUM_BLOBS_DELETED));
-        assertEquals("Num candidates mismatch", candidates, getStatCount(NUM_CANDIDATES));
+    private void assertStats(StatisticsProvider statsProvider, int start, int failure, long deleted, long candidates) {
+        assertEquals("Start counter mismatch", start, getStatCount(statsProvider, START));
+        assertEquals("Finish error mismatch", failure, getStatCount(statsProvider, FINISH_FAILURE));
+        assertEquals("Num deleted mismatch", deleted, getStatCount(statsProvider, NUM_BLOBS_DELETED));
+        assertEquals("Num candidates mismatch", candidates, getStatCount(statsProvider, NUM_CANDIDATES));
     }
 
-    private long getStatCount(String name) {
+    private long getStatCount(StatisticsProvider statsProvider, String name) {
         return statsProvider.getCounterStats(
             TYPE + "." + NAME + "." + name, METRICS_ONLY).getCount();
     }
 
-    private MarkSweepGarbageCollector initGC(long blobGcMaxAgeInSecs, ThreadPoolExecutor executor)
-        throws Exception {
-        return initGC(blobGcMaxAgeInSecs, executor, folder.newFolder().getAbsolutePath());
-    }
-
-    private MarkSweepGarbageCollector initGC(long blobGcMaxAgeInSecs, ThreadPoolExecutor executor,
-        String root) throws Exception {
-        String repoId = null;
-        if (SharedDataStoreUtils.isShared(blobStore)) {
-            repoId = ClusterRepositoryInfo.getOrCreateId(nodeStore);
-            ((SharedDataStore) blobStore).addMetadataRecord(
-                new ByteArrayInputStream(new byte[0]),
-                REPOSITORY.getNameFromId(repoId));
-        }
-
-        MarkSweepGarbageCollector gc =
-            new MarkSweepGarbageCollector(referenceRetriever,
-                blobStore, executor,
-                root, 2048, blobGcMaxAgeInSecs, repoId, wb, statsProvider);
-        return gc;
-    }
-
-    protected Set<String> iterate() throws Exception {
+    protected Set<String> iterate(GarbageCollectableBlobStore blobStore) throws Exception {
         Iterator<String> cur = blobStore.getAllChunkIds(0);
 
         Set<String> existing = Sets.newHashSet();
@@ -282,10 +355,12 @@ public class BlobGCTest {
         return existing;
     }
 
-    public BlobStoreState setUp(
+    public BlobStoreState setUp (NodeStore nodeStore,
+        GarbageCollectableBlobStore blobStore,
         int count,
         int deletions,
-        int blobSize) throws Exception {
+        int blobSize,
+        int seed) throws Exception {
 
         preSetup();
 
@@ -304,7 +379,8 @@ public class BlobGCTest {
 
         BlobStoreState state = new BlobStoreState();
         for (int i = 0; i < numBlobs; i++) {
-            Blob b = nodeStore.createBlob(randomStream(i, blobSize));
+            Blob b = nodeStore.createBlob(
+                randomStream(Integer.parseInt(String.valueOf(seed) + String.valueOf(i)), blobSize));
             Iterator<String> idIter = blobStore.resolveChunks(b.getContentIdentity());
             while (idIter.hasNext()) {
                 String chunk = idIter.next();
@@ -327,15 +403,18 @@ public class BlobGCTest {
         // Sleep a little to make eligible for cleanup
         clock.waitUntil(5);
 
-        postSetup(state);
+        postSetup(nodeStore, state);
+
+        log.info("{} blobs added : {}", state.blobsAdded.size(), state.blobsAdded);
+        log.info("{} blobs remaining : {}", state.blobsPresent.size(), state.blobsPresent);
 
         return state;
     }
 
-    protected Set<String> createBlobs(int count, int size) throws Exception {
+    protected Set<String> createBlobs(GarbageCollectableBlobStore blobStore, int count, int size) throws Exception {
         HashSet<String> blobSet = new HashSet<String>();
         for  (int i = 0; i < count; i++) {
-            String id = blobStore.writeBlob(randomStream(100 + i, size));
+            String id = blobStore.writeBlob(randomStream(10 + i, size));
             Iterator<String> idIter = blobStore.resolveChunks(id);
             while (idIter.hasNext()) {
                 String chunk = idIter.next();
@@ -346,9 +425,9 @@ public class BlobGCTest {
         return blobSet;
     }
 
-    protected void preSetup() {}
+    void preSetup() {}
 
-    protected void postSetup(BlobStoreState state) {
+    protected void postSetup(NodeStore nodeStore, BlobStoreState state) {
         ((MemoryBlobStoreNodeStore) nodeStore).setReferencedBlobs(state.blobsPresent);
     }
 
@@ -359,13 +438,6 @@ public class BlobGCTest {
         nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
     }
 
-    static InputStream randomStream(int seed, int size) {
-        Random r = new Random(seed);
-        byte[] data = new byte[size];
-        r.nextBytes(data);
-        return new ByteArrayInputStream(data);
-    }
-
     /**
      * Represents state of the blobs after setup
      */

Modified: jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreUtilsTest.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreUtilsTest.java?rev=1840785&r1=1840784&r2=1840785&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreUtilsTest.java (original)
+++ jackrabbit/oak/trunk/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreUtilsTest.java Thu Sep 13 07:51:52 2018
@@ -18,15 +18,12 @@
  */
 package org.apache.jackrabbit.oak.plugins.blob;
 
-import static com.google.common.collect.Sets.newHashSet;
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertTrue;
-
 import java.io.ByteArrayInputStream;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
@@ -35,26 +32,31 @@ import java.util.concurrent.TimeUnit;
 
 import com.google.common.base.Function;
 import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
-import junit.framework.Assert;
-
 import org.apache.commons.io.IOUtils;
 import org.apache.jackrabbit.core.data.DataIdentifier;
 import org.apache.jackrabbit.core.data.DataRecord;
 import org.apache.jackrabbit.core.data.DataStoreException;
 import org.apache.jackrabbit.oak.commons.FileIOUtils;
 import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils;
 import org.apache.jackrabbit.oak.plugins.blob.datastore.SharedDataStoreUtils;
 import org.apache.jackrabbit.oak.plugins.blob.datastore.SharedDataStoreUtils.SharedStoreRecordType;
-import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils;
 import org.jetbrains.annotations.Nullable;
+import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static com.google.common.collect.Sets.newHashSet;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 /**
  * Test for SharedDataUtils to test addition, retrieval and deletion of root records.
  */
@@ -84,7 +86,6 @@ public class SharedDataStoreUtilsTest {
         dataStore.addMetadataRecord(new ByteArrayInputStream(new byte[0]),
             SharedStoreRecordType.REPOSITORY.getNameFromId(repoId2));
         DataRecord repo2 = dataStore.getMetadataRecord(SharedStoreRecordType.REPOSITORY.getNameFromId(repoId2));
-        
         // Add reference marker record for repo1
         dataStore.addMetadataRecord(new ByteArrayInputStream(new byte[0]),
                                        SharedStoreRecordType.MARKED_START_MARKER.getNameFromId(repoId1));
@@ -161,16 +162,10 @@ public class SharedDataStoreUtilsTest {
 
         dataStore.addMetadataRecord(new FileInputStream(f),
             SharedStoreRecordType.REFERENCES.getNameFromId(repoId));
+        assertTrue(dataStore.metadataRecordExists(SharedStoreRecordType.REFERENCES.getNameFromId(repoId)));
         DataRecord rec = dataStore.getMetadataRecord(SharedStoreRecordType.REFERENCES.getNameFromId(repoId));
         Set<String> refsReturned = FileIOUtils.readStringsAsSet(rec.getStream(), false);
-        Assert.assertEquals(refs, refsReturned);
-        dataStore.deleteAllMetadataRecords(SharedStoreRecordType.REFERENCES.getType());
-
-        dataStore.addMetadataRecord(f,
-            SharedStoreRecordType.REFERENCES.getNameFromId(repoId));
-        rec = dataStore.getMetadataRecord(SharedStoreRecordType.REFERENCES.getNameFromId(repoId));
-        refsReturned = FileIOUtils.readStringsAsSet(rec.getStream(), false);
-        Assert.assertEquals(refs, refsReturned);
+        assertEquals(refs, refsReturned);
         assertEquals(
             SharedStoreRecordType.REFERENCES.getIdFromName(rec.getIdentifier().toString()),
             repoId);
@@ -178,6 +173,151 @@ public class SharedDataStoreUtilsTest {
     }
 
     @Test
+    public void testAddMetadataWithExtraSuffix() throws Exception {
+        addMultipleMetadata(true);
+    }
+
+    @Test
+    public void testAddMetadataWithConditionalExtraSuffix() throws Exception {
+        addMultipleMetadata(false);
+    }
+
+    @Test
+    public void testRefsAvailableAllRepos() throws Exception {
+        File rootFolder = folder.newFolder();
+        dataStore = getBlobStore(rootFolder);
+
+        loadData(true);
+
+        // check if All the references from registered repositories are available
+        Assert.assertTrue(SharedDataStoreUtils
+            .refsNotAvailableFromRepos(dataStore.getAllMetadataRecords(SharedStoreRecordType.REPOSITORY.getType()),
+                dataStore.getAllMetadataRecords(SharedStoreRecordType.REFERENCES.getType())).isEmpty());
+    }
+
+    @Test
+    public void testRefsNotAvailableAllRepos() throws Exception {
+        File rootFolder = folder.newFolder();
+        dataStore = getBlobStore(rootFolder);
+
+        Data data = loadData(true);
+        // Delete one of the references file
+        String expectedMissingRepoId = data.repoIds.get(data.repoIds.size() - 1);
+        dataStore.deleteAllMetadataRecords(
+            SharedStoreRecordType.REFERENCES.getNameFromId(expectedMissingRepoId));
+
+        // check if All the references from registered repositories are available
+        Set<String> missingRepoIds = SharedDataStoreUtils
+            .refsNotAvailableFromRepos(dataStore.getAllMetadataRecords(SharedStoreRecordType.REPOSITORY.getType()),
+                dataStore.getAllMetadataRecords(SharedStoreRecordType.REFERENCES.getType()));
+        assertEquals(Sets.newHashSet(expectedMissingRepoId), missingRepoIds);
+    }
+
+    private void addMultipleMetadata(boolean extended) throws Exception {
+        File rootFolder = folder.newFolder();
+        dataStore = getBlobStore(rootFolder);
+
+        Data data = loadData(extended);
+
+        // Retrieve all records
+        List<DataRecord> recs =
+            dataStore.getAllMetadataRecords(SharedStoreRecordType.REFERENCES.getType());
+
+        Set<String> returnedRefs = Sets.newHashSet();
+        for (DataRecord retRec : recs) {
+            assertTrue(data.repoIds.contains(SharedStoreRecordType.REFERENCES.getIdFromName(retRec.getIdentifier().toString())));
+            returnedRefs.addAll(FileIOUtils.readStringsAsSet(retRec.getStream(), false));
+        }
+        assertEquals(data.refs, returnedRefs);
+
+        // Delete all references records
+        dataStore.deleteAllMetadataRecords(SharedStoreRecordType.REFERENCES.getType());
+        for (int i = 0; i < data.repoIds.size(); i++) {
+            assertFalse(
+                dataStore.metadataRecordExists(getName(extended, data.repoIds.get(i), data.suffixes.get(i + 1))));
+
+            if (i == 0) {
+                assertFalse(
+                    dataStore.metadataRecordExists(getName(extended, data.repoIds.get(i), data.suffixes.get(i))));
+            }
+        }
+    }
+
+    private Data loadData(boolean extended) throws Exception {
+        Data data = new Data();
+        String repoId1 = UUID.randomUUID().toString();
+        data.repoIds.add(repoId1);
+        dataStore.addMetadataRecord(new ByteArrayInputStream(new byte[0]),
+            SharedStoreRecordType.REPOSITORY.getNameFromId(repoId1));
+
+        Set<String> refs = Sets.newHashSet("1_1", "1_2");
+        data.refs.addAll(refs);
+        File f = folder.newFile();
+        FileIOUtils.writeStrings(refs.iterator(), f, false);
+
+        Set<String> refs2 = Sets.newHashSet("2_1", "2_2");
+        data.refs.addAll(refs2);
+        File f2 = folder.newFile();
+        FileIOUtils.writeStrings(refs2.iterator(), f2, false);
+
+        String repoId2 = UUID.randomUUID().toString();
+        data.repoIds.add(repoId2);
+        dataStore.addMetadataRecord(new ByteArrayInputStream(new byte[0]),
+            SharedStoreRecordType.REPOSITORY.getNameFromId(repoId2));
+
+        Set<String> refs3 = Sets.newHashSet("3_1", "3_2");
+        data.refs.addAll(refs3);
+        File f3 = folder.newFile();
+        FileIOUtils.writeStrings(refs3.iterator(), f3, false);
+
+        String suffix1 = String.valueOf(System.currentTimeMillis());
+        data.suffixes.add(suffix1);
+        dataStore.addMetadataRecord(new FileInputStream(f), getName(extended, repoId1, suffix1));
+
+        // Checks for the presence of existing record
+        assertTrue(dataStore.metadataRecordExists(getName(extended, repoId1, suffix1)));
+
+        DataRecord rec = dataStore.getMetadataRecord(getName(extended, repoId1, suffix1));
+        assertEquals(refs, FileIOUtils.readStringsAsSet(rec.getStream(), false));
+
+        // Add a duplicate
+        TimeUnit.MILLISECONDS.sleep(100);
+        String suffix2 = String.valueOf(System.currentTimeMillis());
+        data.suffixes.add(suffix2);
+        dataStore.addMetadataRecord(f2, getName(true, repoId1, suffix2));
+
+        // Check presence of the duplicate
+        assertTrue(
+            dataStore.metadataRecordExists(getName(extended, repoId1, suffix2)));
+
+        // Add a separate record
+        TimeUnit.MILLISECONDS.sleep(100);
+        String suffix3 = String.valueOf(System.currentTimeMillis());
+        data.suffixes.add(suffix3);
+        dataStore.addMetadataRecord(f3, getName(extended, repoId2, suffix3));
+
+        // Check presence of the separate
+        assertTrue(
+            dataStore.metadataRecordExists(getName(extended, repoId2, suffix3)));
+
+        return data;
+    }
+
+    class Data {
+        List<String> suffixes = Lists.newArrayList();
+        List<String> repoIds = Lists.newArrayList();
+        Set<String> refs = Sets.newHashSet();
+    }
+
+    private static String getName(boolean extended, String repoId, String suffix) {
+        if (!extended) {
+            return SharedStoreRecordType.REFERENCES.getNameFromId(repoId);
+        } else {
+            return SharedStoreRecordType.REFERENCES.getNameFromIdPrefix(repoId, suffix);
+        }
+    }
+
+    @Test
     public void testGetAllChunkIds() throws Exception {
         File rootFolder = folder.newFolder();
         dataStore = getBlobStore(rootFolder);

Modified: jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java?rev=1840785&r1=1840784&r2=1840785&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java (original)
+++ jackrabbit/oak/trunk/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java Thu Sep 13 07:51:52 2018
@@ -22,7 +22,6 @@ package org.apache.jackrabbit.oak.plugin
 import java.io.ByteArrayInputStream;
 import java.io.File;
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.Date;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -34,7 +33,6 @@ import java.util.concurrent.TimeUnit;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.MoreExecutors;
-
 import org.apache.jackrabbit.core.data.DataStore;
 import org.apache.jackrabbit.oak.api.Blob;
 import org.apache.jackrabbit.oak.plugins.blob.BlobGarbageCollector;
@@ -42,13 +40,13 @@ import org.apache.jackrabbit.oak.plugins
 import org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector;
 import org.apache.jackrabbit.oak.plugins.blob.SharedDataStore;
 import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils;
 import org.apache.jackrabbit.oak.plugins.blob.datastore.SharedDataStoreUtils.SharedStoreRecordType;
 import org.apache.jackrabbit.oak.plugins.document.VersionGarbageCollector.VersionGCStats;
-import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils;
 import org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore;
-import org.apache.jackrabbit.oak.spi.cluster.ClusterRepositoryInfo;
 import org.apache.jackrabbit.oak.spi.blob.BlobStore;
 import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore;
+import org.apache.jackrabbit.oak.spi.cluster.ClusterRepositoryInfo;
 import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
 import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
@@ -61,6 +59,7 @@ import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils.randomStream;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -77,6 +76,7 @@ public class SharedBlobStoreGCTest {
     protected Cluster cluster2;
     private Clock clock;
     protected boolean retryCreation = false;
+    private File rootFolder;
 
     @Before
     public void setUp() throws Exception {
@@ -86,7 +86,7 @@ public class SharedBlobStoreGCTest {
         clock.waitUntil(Revision.getCurrentTimestamp());
         DataStoreUtils.time = clock.getTime();
 
-        File rootFolder = folder.newFolder();
+        rootFolder = folder.newFolder();
         BlobStore blobeStore1 = getBlobStore(rootFolder);
         DocumentNodeStore ds1 = new DocumentMK.Builder()
                 .setAsyncDelay(0)
@@ -120,13 +120,6 @@ public class SharedBlobStoreGCTest {
         log.debug("Initialized {}", cluster2);
     }
 
-    static InputStream randomStream(int seed, int size) {
-        Random r = new Random(seed);
-        byte[] data = new byte[size];
-        r.nextBytes(data);
-        return new ByteArrayInputStream(data);
-    }
-
     @Test
     public void testGC() throws Exception {
         log.debug("Running testGC()");
@@ -225,6 +218,38 @@ public class SharedBlobStoreGCTest {
             cluster1.getExistingBlobIds()).isEmpty());
     }
 
+    @Test
+    public void testMarkOnCloned() throws Exception {
+        log.debug("Running testMarkOnCloned()");
+
+        BlobStore blobeStore3 = getBlobStore(rootFolder);
+        DocumentNodeStore ds3 = new DocumentMK.Builder()
+            .setAsyncDelay(0)
+            .setDocumentStore(new MemoryDocumentStore())
+            .setBlobStore(blobeStore3)
+            .clock(clock)
+            .getNodeStore();
+        NodeBuilder a = ds3.getRoot().builder();
+        a.child(":clusterConfig").setProperty(":clusterId", cluster2.repoId);
+
+        Cluster cluster3 = new Cluster(ds3, cluster2.repoId, 120);
+        cluster3.init();
+        log.debug("Initialized {}", cluster3);
+
+        // run the mark phase on other repositories
+        cluster1.gc.collectGarbage(true);
+        cluster2.gc.collectGarbage(true);
+
+        // Execute the gc with sweep
+        cluster3.gc.collectGarbage(false);
+
+        Set<String> existing = cluster1.getExistingBlobIds();
+        log.debug("Existing blobs {}", existing);
+        assertTrue(existing.containsAll(cluster2.getInitBlobs()));
+        assertTrue(existing.containsAll(cluster1.getInitBlobs()));
+        assertTrue(existing.containsAll(cluster3.getInitBlobs()));
+    }
+
     @After
     public void tearDown() throws Exception {
         DataStoreUtils.time = -1;