You are viewing a plain text version of this content. The canonical link for it is here.
Posted to oak-commits@jackrabbit.apache.org by md...@apache.org on 2016/08/03 16:20:09 UTC

svn commit: r1755096 - in /jackrabbit/oak/trunk/oak-segment-tar/src: main/java/org/apache/jackrabbit/oak/segment/file/ test/java/org/apache/jackrabbit/oak/segment/

Author: mduerig
Date: Wed Aug  3 16:20:09 2016
New Revision: 1755096

URL: http://svn.apache.org/viewvc?rev=1755096&view=rev
Log:
OAK-4579: Improve FileStore.size calculation
Calculate the size of the repository from a snapshot of the TarReaders and the TarWriter size.
Credits to Andrei Dulceanu for the patch

Modified:
    jackrabbit/oak/trunk/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java
    jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java
    jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ExternalBlobIT.java
    jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCompactionIT.java
    jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentDataStoreBlobGCIT.java

Modified: jackrabbit/oak/trunk/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java?rev=1755096&r1=1755095&r2=1755096&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java (original)
+++ jackrabbit/oak/trunk/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java Wed Aug  3 16:20:09 2016
@@ -75,6 +75,7 @@ import com.google.common.base.Function;
 import com.google.common.base.Predicate;
 import com.google.common.base.Stopwatch;
 import com.google.common.base.Supplier;
+import com.google.common.collect.ImmutableList;
 import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
 import org.apache.jackrabbit.oak.api.jmx.CacheStatsMBean;
 import org.apache.jackrabbit.oak.plugins.blob.ReferenceCollector;
@@ -201,11 +202,6 @@ public class FileStore implements Segmen
     private final GCListener gcListener;
 
     /**
-     * Represents the approximate size on disk of the repository.
-     */
-    private final AtomicLong approximateSize;
-
-    /**
      * This flag is periodically updated by calling the {@code SegmentGCOptions}
      * at regular intervals.
      */
@@ -291,10 +287,7 @@ public class FileStore implements Segmen
                         memoryMapping, recover));
             }
         }
-
-        long initialSize = size();
-        this.approximateSize = new AtomicLong(initialSize);
-        this.stats = new FileStoreStats(builder.getStatsProvider(), this, initialSize);
+        this.stats = new FileStoreStats(builder.getStatsProvider(), this, size());
 
         if (!readOnly) {
             if (indices.length > 0) {
@@ -607,17 +600,28 @@ public class FileStore implements Segmen
         return dataFiles;
     }
 
-    public final long size() {
+    /**
+     * @return the size of this store. This method shouldn't be called from
+     * a very tight loop as it contents with the {@link #fileStoreLock}.
+     */
+    private long size() {
+        List<TarReader> readersSnapshot = null;
+        long writeFileSnapshotSize = 0;
+
         fileStoreLock.readLock().lock();
         try {
-            long size = writeFile != null ? writeFile.length() : 0;
-            for (TarReader reader : readers) {
-                size += reader.size();
-            }
-            return size;
+            readersSnapshot = ImmutableList.copyOf(readers);
+            writeFileSnapshotSize = writeFile != null ? writeFile.length() : 0;
         } finally {
             fileStoreLock.readLock().unlock();
         }
+
+        long size = writeFileSnapshotSize;
+        for (TarReader reader : readersSnapshot) {
+            size += reader.size();
+        }
+
+        return size;
     }
 
     public int readerCount(){
@@ -820,7 +824,6 @@ public class FileStore implements Segmen
         }
 
         long finalSize = size();
-        approximateSize.set(finalSize);
         stats.reclaimed(initialSize - finalSize);
         // FIXME OAK-4106: Reclaimed size reported by FileStore.cleanup is off
         gcListener.cleaned(initialSize - finalSize, finalSize);
@@ -1315,7 +1318,6 @@ public class FileStore implements Segmen
             if (size >= maxFileSize) {
                 newWriter();
             }
-            approximateSize.addAndGet(TarWriter.BLOCK_SIZE + length + TarWriter.getPaddingSize(length));
         } finally {
             fileStoreLock.writeLock().unlock();
         }
@@ -1416,7 +1418,7 @@ public class FileStore implements Segmen
     }
 
     private void checkDiskSpace() {
-        long repositoryDiskSpace = approximateSize.get();
+        long repositoryDiskSpace = size();
         long availableDiskSpace = directory.getFreeSpace();
         boolean updated = gcOptions.isDiskSpaceSufficient(repositoryDiskSpace, availableDiskSpace);
         boolean previous = sufficientDiskSpace.getAndSet(updated);

Modified: jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java?rev=1755096&r1=1755095&r2=1755096&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java (original)
+++ jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactionAndCleanupIT.java Wed Aug  3 16:20:09 2016
@@ -46,7 +46,9 @@ import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
 import java.util.concurrent.FutureTask;
+import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -63,6 +65,7 @@ import org.apache.jackrabbit.oak.spi.sta
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
 import org.apache.jackrabbit.oak.spi.state.NodeState;
 import org.apache.jackrabbit.oak.spi.state.NodeStore;
+import org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
@@ -88,8 +91,10 @@ public class CompactionAndCleanupIT {
     @Test
     public void compactionNoBinaryClone()
     throws IOException, CommitFailedException {
+        ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
         FileStore fileStore = fileStoreBuilder(getFileStoreFolder())
                 .withGCOptions(defaultGCOptions().setRetainedGenerations(2))
+                .withStatisticsProvider(new DefaultStatisticsProvider(executor))
                 .withMaxFileSize(1)
                 .build();
         SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
@@ -110,7 +115,7 @@ public class CompactionAndCleanupIT {
             nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
             fileStore.flush();
 
-            long size1 = fileStore.size();
+            long size1 = fileStore.getStats().getApproximateSize();
             log.debug("File store size {}", byteCountToDisplaySize(size1));
 
             // Create a property with 5 MB blob
@@ -119,7 +124,7 @@ public class CompactionAndCleanupIT {
             nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
             fileStore.flush();
 
-            long size2 = fileStore.size();
+            long size2 = fileStore.getStats().getApproximateSize();
             assertSize("1st blob added", size2, size1 + blobSize, size1 + blobSize + (blobSize / 100));
 
             // Now remove the property. No gc yet -> size doesn't shrink
@@ -128,14 +133,14 @@ public class CompactionAndCleanupIT {
             nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
             fileStore.flush();
 
-            long size3 = fileStore.size();
+            long size3 = fileStore.getStats().getApproximateSize();
             assertSize("1st blob removed", size3, size2, size2 + 4096);
 
             // 1st gc cycle -> no reclaimable garbage...
             fileStore.compact();
             fileStore.cleanup();
 
-            long size4 = fileStore.size();
+            long size4 = fileStore.getStats().getApproximateSize();
             assertSize("1st gc", size4, size3, size3 + size1);
 
             // Add another 5MB binary doubling the blob size
@@ -144,21 +149,21 @@ public class CompactionAndCleanupIT {
             nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
             fileStore.flush();
 
-            long size5 = fileStore.size();
+            long size5 = fileStore.getStats().getApproximateSize();
             assertSize("2nd blob added", size5, size4 + blobSize, size4 + blobSize + (blobSize / 100));
 
             // 2st gc cycle -> 1st blob should get collected
             fileStore.compact();
             fileStore.cleanup();
 
-            long size6 = fileStore.size();
+            long size6 = fileStore.getStats().getApproximateSize();
             assertSize("2nd gc", size6, size5 - blobSize - size1, size5 - blobSize);
 
             // 3rtd gc cycle -> no  significant change
             fileStore.compact();
             fileStore.cleanup();
 
-            long size7 = fileStore.size();
+            long size7 = fileStore.getStats().getApproximateSize();
             assertSize("3rd gc", size7, size6 * 10/11 , size6 * 10/9);
 
             // No data loss
@@ -174,9 +179,11 @@ public class CompactionAndCleanupIT {
     public void offlineCompaction()
     throws IOException, CommitFailedException {
         SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
+        ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
         FileStore fileStore = fileStoreBuilder(getFileStoreFolder())
                 .withMaxFileSize(1)
                 .withGCOptions(gcOptions)
+                .withStatisticsProvider(new DefaultStatisticsProvider(executor))
                 .build();
         SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
 
@@ -196,7 +203,7 @@ public class CompactionAndCleanupIT {
             nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY);
             fileStore.flush();
 
-            long size1 = fileStore.size();
+            long size1 = fileStore.getStats().getApproximateSize();
             log.debug("File store size {}", byteCountToDisplaySize(size1));
 
             // Create a property with 5 MB blob
@@ -205,7 +212,7 @@ public class CompactionAndCleanupIT {
             nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
             fileStore.flush();
 
-            long size2 = fileStore.size();
+            long size2 = fileStore.getStats().getApproximateSize();
             assertSize("1st blob added", size2, size1 + blobSize, size1 + blobSize + (blobSize / 100));
 
             // Now remove the property. No gc yet -> size doesn't shrink
@@ -214,14 +221,14 @@ public class CompactionAndCleanupIT {
             nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
             fileStore.flush();
 
-            long size3 = fileStore.size();
+            long size3 = fileStore.getStats().getApproximateSize();
             assertSize("1st blob removed", size3, size2, size2 + 4096);
 
             // 1st gc cycle -> 1st blob should get collected
             fileStore.compact();
             fileStore.cleanup();
 
-            long size4 = fileStore.size();
+            long size4 = fileStore.getStats().getApproximateSize();
             assertSize("1st gc", size4, size3 - blobSize - size1, size3
                     - blobSize);
 
@@ -231,21 +238,21 @@ public class CompactionAndCleanupIT {
             nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
             fileStore.flush();
 
-            long size5 = fileStore.size();
+            long size5 = fileStore.getStats().getApproximateSize();
             assertSize("2nd blob added", size5, size4 + blobSize, size4 + blobSize + (blobSize / 100));
 
             // 2st gc cycle -> 2nd blob should *not* be collected
             fileStore.compact();
             fileStore.cleanup();
 
-            long size6 = fileStore.size();
+            long size6 = fileStore.getStats().getApproximateSize();
             assertSize("2nd gc", size6, size5 * 10/11, size5 * 10/9);
 
             // 3rd gc cycle -> no significant change
             fileStore.compact();
             fileStore.cleanup();
 
-            long size7 = fileStore.size();
+            long size7 = fileStore.getStats().getApproximateSize();
             assertSize("3rd gc", size7, size6 * 10/11 , size6 * 10/9);
 
             // No data loss
@@ -265,9 +272,11 @@ public class CompactionAndCleanupIT {
     public void offlineCompactionCps() throws IOException,
             CommitFailedException {
         SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
+        ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
         FileStore fileStore = fileStoreBuilder(getFileStoreFolder())
                 .withMaxFileSize(1)
                 .withGCOptions(gcOptions)
+                .withStatisticsProvider(new DefaultStatisticsProvider(executor))
                 .build();
         SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
         try {
@@ -285,7 +294,7 @@ public class CompactionAndCleanupIT {
             fileStore.compact();
             fileStore.cleanup();
             // Compacts to 548Kb
-            long size0 = fileStore.size();
+            long size0 = fileStore.getStats().getApproximateSize();
 
             int cpNo = 4;
             Set<String> cps = new HashSet<String>();
@@ -297,11 +306,11 @@ public class CompactionAndCleanupIT {
                 assertTrue(nodeStore.retrieve(cp) != null);
             }
 
-            long size1 = fileStore.size();
+            long size1 = fileStore.getStats().getApproximateSize();
             assertSize("with checkpoints added", size1, size0, size0 * 11 / 10);
             fileStore.compact();
             fileStore.cleanup();
-            long size2 = fileStore.size();
+            long size2 = fileStore.getStats().getApproximateSize();
             assertSize("with checkpoints compacted", size2, size1 * 9/10, size1 * 11 / 10);
         } finally {
             fileStore.close();
@@ -317,8 +326,12 @@ public class CompactionAndCleanupIT {
             CommitFailedException {
         SegmentGCOptions gcOptions = defaultGCOptions().setOffline()
                 .withBinaryDeduplication();
+        ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
         FileStore fileStore = fileStoreBuilder(getFileStoreFolder())
-                .withMaxFileSize(1).withGCOptions(gcOptions).build();
+                .withMaxFileSize(1)
+                .withGCOptions(gcOptions)
+                .withStatisticsProvider(new DefaultStatisticsProvider(executor))
+                .build();
         SegmentNodeStore nodeStore = SegmentNodeStoreBuilders
                 .builder(fileStore).build();
 
@@ -349,10 +362,10 @@ public class CompactionAndCleanupIT {
                 assertTrue(nodeStore.retrieve(cp) != null);
             }
 
-            long size1 = fileStore.size();
+            long size1 = fileStore.getStats().getApproximateSize();
             fileStore.compact();
             fileStore.cleanup();
-            long size2 = fileStore.size();
+            long size2 = fileStore.getStats().getApproximateSize();
             assertSize("with compacted binaries", size2, 0, size1 - blobSize);
         } finally {
             fileStore.close();
@@ -372,8 +385,12 @@ public class CompactionAndCleanupIT {
         SegmentGCOptions gcOptions = defaultGCOptions().setOffline()
                 .withBinaryDeduplication()
                 .setBinaryDeduplicationMaxSize(blobSize / 2);
+        ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
         FileStore fileStore = fileStoreBuilder(getFileStoreFolder())
-                .withMaxFileSize(1).withGCOptions(gcOptions).build();
+                .withMaxFileSize(1)
+                .withGCOptions(gcOptions)
+                .withStatisticsProvider(new DefaultStatisticsProvider(executor))
+                .build();
         SegmentNodeStore nodeStore = SegmentNodeStoreBuilders
                 .builder(fileStore).build();
 
@@ -403,10 +420,10 @@ public class CompactionAndCleanupIT {
                 assertTrue(nodeStore.retrieve(cp) != null);
             }
 
-            long size1 = fileStore.size();
+            long size1 = fileStore.getStats().getApproximateSize();
             fileStore.compact();
             fileStore.cleanup();
-            long size2 = fileStore.size();
+            long size2 = fileStore.getStats().getApproximateSize();
 
             // not expected to reduce the size too much, as the binaries are
             // above the threshold
@@ -425,8 +442,12 @@ public class CompactionAndCleanupIT {
     public void offlineCompactionBinR1() throws IOException,
             CommitFailedException {
         SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
+        ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
         FileStore fileStore = fileStoreBuilder(getFileStoreFolder())
-                .withMaxFileSize(1).withGCOptions(gcOptions).build();
+                .withMaxFileSize(1)
+                .withGCOptions(gcOptions)
+                .withStatisticsProvider(new DefaultStatisticsProvider(executor))
+                .build();
         SegmentNodeStore nodeStore = SegmentNodeStoreBuilders
                 .builder(fileStore).build();
 
@@ -457,10 +478,10 @@ public class CompactionAndCleanupIT {
             }
 
             // 5Mb, de-duplication by the SegmentWriter
-            long size1 = fileStore.size();
+            long size1 = fileStore.getStats().getApproximateSize();
             fileStore.compact();
             fileStore.cleanup();
-            long size2 = fileStore.size();
+            long size2 = fileStore.getStats().getApproximateSize();
             assertSize("with compacted binaries", size2, 0, size1 * 11 / 10);
         } finally {
             fileStore.close();
@@ -723,8 +744,8 @@ public class CompactionAndCleanupIT {
             try {
                 // The 1M blob should get gc-ed
                 fileStore.cleanup();
-                assertTrue(ref + " repository size " + fileStore.size() + " < " + 1024 * 1024,
-                        fileStore.size() < 1024 * 1024);
+                assertTrue(ref + " repository size " + fileStore.getStats().getApproximateSize() + " < " + 1024 * 1024,
+                        fileStore.getStats().getApproximateSize() < 1024 * 1024);
             } finally {
                 fileStore.close();
             }

Modified: jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ExternalBlobIT.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ExternalBlobIT.java?rev=1755096&r1=1755095&r2=1755096&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ExternalBlobIT.java (original)
+++ jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ExternalBlobIT.java Wed Aug  3 16:20:09 2016
@@ -31,6 +31,8 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.List;
 import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
 
 import javax.annotation.Nonnull;
 
@@ -52,6 +54,7 @@ import org.apache.jackrabbit.oak.spi.com
 import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
 import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider;
 import org.junit.After;
 import org.junit.Ignore;
 import org.junit.Rule;
@@ -159,8 +162,12 @@ public class ExternalBlobIT {
 
     protected SegmentNodeStore getNodeStore(BlobStore blobStore) throws IOException {
         if (nodeStore == null) {
+            ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
+            
             store = fileStoreBuilder(getWorkDir()).withBlobStore(blobStore)
-                    .withMaxFileSize(1).build();
+                    .withMaxFileSize(1)
+                    .withStatisticsProvider(new DefaultStatisticsProvider(executor))
+                    .build();
             nodeStore = SegmentNodeStoreBuilders.builder(store).build();
         }
         return nodeStore;
@@ -271,13 +278,13 @@ public class ExternalBlobIT {
         store.flush();
 
         // blob went to the external store
-        assertTrue(store.size() < 10 * 1024);
+        assertTrue(store.getStats().getApproximateSize() < 10 * 1024);
         close();
 
         SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
         store = fileStoreBuilder(getWorkDir()).withMaxFileSize(1)
                 .withGCOptions(gcOptions).build();
-        assertTrue(store.size() < 10 * 1024);
+        assertTrue(store.getStats().getApproximateSize() < 10 * 1024);
 
         store.compact();
         store.cleanup();

Modified: jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCompactionIT.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCompactionIT.java?rev=1755096&r1=1755095&r2=1755096&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCompactionIT.java (original)
+++ jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentCompactionIT.java Wed Aug  3 16:20:09 2016
@@ -52,6 +52,8 @@ import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CancellationException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.ReadWriteLock;
@@ -92,6 +94,7 @@ import org.apache.jackrabbit.oak.spi.sta
 import org.apache.jackrabbit.oak.spi.whiteboard.CompositeRegistration;
 import org.apache.jackrabbit.oak.spi.whiteboard.Registration;
 import org.apache.jackrabbit.oak.stats.Clock;
+import org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -229,10 +232,12 @@ public class SegmentCompactionIT {
         }, 1, 1, SECONDS);
 
         SegmentGCOptions gcOptions = defaultGCOptions().setLockWaitTime(lockWaitTime);
+        ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
         fileStore = fileStoreBuilder(folder.getRoot())
                 .withMemoryMapping(true)
                 .withGCMonitor(gcMonitor)
                 .withGCOptions(gcOptions)
+                .withStatisticsProvider(new DefaultStatisticsProvider(executor))
                 .build();
         nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
 
@@ -299,7 +304,7 @@ public class SegmentCompactionIT {
         scheduler.scheduleAtFixedRate(new Runnable() {
             @Override
             public void run() {
-                fileStoreSize = fileStore.size();
+                fileStoreSize = fileStore.getStats().getApproximateSize();
             }
         }, 1, 1, MINUTES);
     }

Modified: jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentDataStoreBlobGCIT.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentDataStoreBlobGCIT.java?rev=1755096&r1=1755095&r2=1755096&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentDataStoreBlobGCIT.java (original)
+++ jackrabbit/oak/trunk/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentDataStoreBlobGCIT.java Wed Aug  3 16:20:09 2016
@@ -42,6 +42,7 @@ import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.Executor;
 import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
@@ -75,6 +76,7 @@ import org.apache.jackrabbit.oak.spi.blo
 import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
 import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
+import org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider;
 import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.Rule;
@@ -105,10 +107,12 @@ public class SegmentDataStoreBlobGCIT {
 
     protected SegmentNodeStore getNodeStore(BlobStore blobStore) throws IOException {
         if (nodeStore == null) {
+            ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
             FileStoreBuilder builder = fileStoreBuilder(getWorkDir())
                     .withBlobStore(blobStore)
                     .withMaxFileSize(256)
                     .withMemoryMapping(false)
+                    .withStatisticsProvider(new DefaultStatisticsProvider(executor))
                     .withGCOptions(gcOptions);
             store = builder.build();
             nodeStore = SegmentNodeStoreBuilders.builder(store).build();
@@ -144,7 +148,7 @@ public class SegmentDataStoreBlobGCIT {
         }
         nodeStore.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY);
 
-        final long dataSize = store.size();
+        final long dataSize = store.getStats().getApproximateSize();
         log.info("File store dataSize {}", byteCountToDisplaySize(dataSize));
 
         // 2. Now remove the nodes to generate garbage