You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by sa...@apache.org on 2016/05/23 20:15:58 UTC

incubator-geode git commit: initial commit

Repository: incubator-geode
Updated Branches:
  refs/heads/feature/GEODE-93 [created] 8694c5582


initial commit


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/8694c558
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/8694c558
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/8694c558

Branch: refs/heads/feature/GEODE-93
Commit: 8694c558227badcc273538bfca0b2d156199860a
Parents: 74b1f1e
Author: Sai Boorlagadda <sb...@pivotal.io>
Authored: Mon May 23 13:10:47 2016 -0700
Committer: Sai Boorlagadda <sb...@pivotal.io>
Committed: Mon May 23 13:10:47 2016 -0700

----------------------------------------------------------------------
 .../internal/cache/AbstractLRURegionMap.java    |   1 +
 .../gemfire/internal/cache/DiskEntry.java       |  45 +++++---
 .../gemfire/internal/cache/DiskRegionStats.java |   4 +
 .../gemfire/internal/cache/LocalRegion.java     |   8 +-
 .../cache/PartitionedRegionStatsJUnitTest.java  | 102 ++++++++++++++++---
 5 files changed, 126 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8694c558/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractLRURegionMap.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractLRURegionMap.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractLRURegionMap.java
index 14c431d..c59b4f7 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractLRURegionMap.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractLRURegionMap.java
@@ -302,6 +302,7 @@ public abstract class AbstractLRURegionMap extends AbstractRegionMap {
           return 0;
         }
         entry.setEvicted();
+        System.out.println(" >>>>> evictEntry:: " + entry.getKey());
         change = DiskEntry.Helper.overflowToDisk((DiskEntry)entry, region, _getCCHelper());
       }
       boolean result = change < 0;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8694c558/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java
index 54ecb04..b530f8b 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java
@@ -1070,15 +1070,26 @@ public interface DiskEntry extends RegionEntry {
           boolean wasEvicted = le.testEvicted();
           le.unsetEvicted();
           if (!Token.isRemovedFromDisk(newValue)) {
-            if (oldValue == null
-                // added null check for bug 41759
-                || wasEvicted && did != null && did.isPendingAsync()) {
-              // Note we do not append this entry because that will be
-              // done by lruEntryUpdate
+            System.out.println("OldValue:" + oldValue + ", NewValue:" + newValue);
+            if (oldValue == null && newValue == Token.TOMBSTONE) {
+              //dr.incNumEntriesInVM(1L);
+              dr.incNumOverflowOnDisk(-1L);
+              dr.incNumOverflowBytesOnDisk(-oldValueLength);
+              incrementBucketStats(region, 0/*InVM*/, -1/*OnDisk*/, -oldValueLength);
+            } else if (oldValue == null && newValue != Token.TOMBSTONE) {
               dr.incNumEntriesInVM(1L);
               dr.incNumOverflowOnDisk(-1L);
               dr.incNumOverflowBytesOnDisk(-oldValueLength);
               incrementBucketStats(region, 1/*InVM*/, -1/*OnDisk*/, -oldValueLength);
+            } else if(oldValue != null && newValue == Token.TOMBSTONE) {
+              dr.incNumEntriesInVM(-1L);
+              if(dr.isBackup()) {
+                dr.incNumOverflowBytesOnDisk(-oldValueLength);
+                incrementBucketStats(region, -1/*InVM*/, 0/*OnDisk*/, -oldValueLength);
+              }
+            } else if(oldValue == Token.TOMBSTONE && newValue != Token.TOMBSTONE) {
+              dr.incNumEntriesInVM(1L);
+              incrementBucketStats(region, 1/*InVM*/, 0/*OnDisk*/, 0/*overflowBytesOnDisk*/);
             }
           }
         }
@@ -1239,10 +1250,10 @@ public interface DiskEntry extends RegionEntry {
                 did.setPendingAsync(false);
               }
               // since it was evicted fix the stats here
-              dr.incNumEntriesInVM(1L);
-              dr.incNumOverflowOnDisk(-1L);
+              //dr.incNumEntriesInVM(1L);
+              //dr.incNumOverflowOnDisk(-1L);
               // no need to dec overflowBytesOnDisk because it was not inced in this case.
-              incrementBucketStats(region, 1/*InVM*/, -1/*OnDisk*/, 0);
+              //incrementBucketStats(region, 1/*InVM*/, -1/*OnDisk*/, 0);
             }
             lruEntryFaultIn((LRUEntry) entry, region);
             lruFaultedIn = true;
@@ -1480,6 +1491,7 @@ public interface DiskEntry extends RegionEntry {
                                              int entriesInVmDelta,
                                              int overflowOnDiskDelta,
                                              int overflowBytesOnDiskDelta) {
+      System.out.println(">>>>>>> incrementBucketStats:: entriesInVmDelta:" + entriesInVmDelta + ", overflowOnDiskDelta:" + overflowOnDiskDelta + ", overflowBytesOnDiskDelta:" + overflowBytesOnDiskDelta);
       if (owner instanceof BucketRegion) {
         ((BucketRegion)owner).incNumEntriesInVM(entriesInVmDelta);
         ((BucketRegion)owner).incNumOverflowOnDisk(overflowOnDiskDelta);
@@ -1550,7 +1562,7 @@ public interface DiskEntry extends RegionEntry {
             // and now we are faulting it out
           }
         }
-
+        System.out.println("overflowToDisk::: entry:" + entry.getKey() + ", wasAlreadyPendingAsync:" + wasAlreadyPendingAsync + ", scheduledAsyncHere:" + scheduledAsyncHere);
         boolean movedValueToDisk = false; // added for bug 41849
         
         // If async then if it does not need to be written (because it already was)
@@ -1568,6 +1580,7 @@ public interface DiskEntry extends RegionEntry {
           }finally {
             entry.afterValueOverflow(region);
           }
+          System.out.println("overflowToDisk::: entry:" + entry.getKey() + " is set to null");
           movedValueToDisk = true;
           change = ((LRUClockNode)entry).updateEntrySize(ccHelper);
         }
@@ -1575,10 +1588,12 @@ public interface DiskEntry extends RegionEntry {
         if (movedValueToDisk) {
           valueLength = getValueLength(did);
         }
-        dr.incNumEntriesInVM(-1L);
-        dr.incNumOverflowOnDisk(1L);
-        dr.incNumOverflowBytesOnDisk(valueLength);
-        incrementBucketStats(region, -1/*InVM*/, 1/*OnDisk*/, valueLength);
+        if(dr.isSync() || movedValueToDisk) {
+          dr.incNumEntriesInVM(-1L);
+          dr.incNumOverflowOnDisk(1L);
+          dr.incNumOverflowBytesOnDisk(valueLength);
+          incrementBucketStats(region, -1/*InVM*/, 1/*OnDisk*/, valueLength);
+        }
       }
       } finally {
         dr.releaseReadLock();
@@ -1766,8 +1781,10 @@ public interface DiskEntry extends RegionEntry {
                 region.updateSizeOnEvict(entry.getKey(), entryValSize);
                 // note the old size was already accounted for
                 // onDisk was already inced so just do the valueLength here
+                dr.incNumEntriesInVM(-1);
+                dr.incNumOverflowOnDisk(1L);
                 dr.incNumOverflowBytesOnDisk(did.getValueLength());
-                incrementBucketStats(region, 0/*InVM*/, 0/*OnDisk*/,
+                incrementBucketStats(region, 0/*InVM*/, 1/*OnDisk*/,
                                      did.getValueLength());
                 try {
                  entry.handleValueOverflow(region);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8694c558/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskRegionStats.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskRegionStats.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskRegionStats.java
index 75cdf21..0db01cd 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskRegionStats.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskRegionStats.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.StatisticsFactory;
 import com.gemstone.gemfire.StatisticsType;
 import com.gemstone.gemfire.StatisticsTypeFactory;
 import com.gemstone.gemfire.internal.StatisticsTypeFactoryImpl;
+import com.gemstone.gemfire.internal.logging.LogService;
 
 /**
  * GemFire statistics about a {@link DiskRegion}.
@@ -235,6 +236,7 @@ public class DiskRegionStats {
    * overflowed to disk by a given amount.
    */
   public void incNumOverflowOnDisk(long delta) {
+    LogService.getLogger().info(">>>>>>>>>>> DiskRegionStats::incNumOverflowOnDisk:::" + getNumOverflowOnDisk() + ",delta=" + delta);
     this.stats.incLong(numOverflowOnDiskId, delta);
   }
 
@@ -243,6 +245,7 @@ public class DiskRegionStats {
    * overflowed to disk by a given amount.
    */
   public void incNumEntriesInVM(long delta) {
+    LogService.getLogger().info(">>>>>>>>>>> DiskRegionStats::incNumEntriesInVM:::" + getNumEntriesInVM() + ",delta=" + delta);
     this.stats.incLong(numEntriesInVMId, delta);
   }
   
@@ -251,6 +254,7 @@ public class DiskRegionStats {
    * overflowed to disk by a given amount.
    */
   public void incNumOverflowBytesOnDisk(long delta) {
+    LogService.getLogger().info(">>>>>>>>>>> DiskRegionStats::incNumOverflowBytesOnDisk:::" + getNumOverflowBytesOnDisk() + ",delta=" + delta);
     this.stats.incLong(numOverflowBytesOnDiskId, delta);
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8694c558/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
index d28496c..286fef8 100644
--- a/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
@@ -3438,10 +3438,10 @@ public class LocalRegion extends AbstractRegion
     //Fix for 45204 - don't include the tombstones in
     //any of our entry count stats.
     this.cachePerfStats.incEntryCount(-delta);
-    if(getDiskRegion() != null) {
-      getDiskRegion().incNumEntriesInVM(-delta);
-    }
-    DiskEntry.Helper.incrementBucketStats(this, -delta/*InVM*/, 0/*OnDisk*/, 0);
+    //if(getDiskRegion() != null) {
+    //  getDiskRegion().incNumEntriesInVM(-delta);
+    //}
+    //DiskEntry.Helper.incrementBucketStats(this, -delta/*InVM*/, 0/*OnDisk*/, 0);
   }
   
   public int getTombstoneCount() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8694c558/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsJUnitTest.java
index 1a3277c..768ad8d 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsJUnitTest.java
@@ -332,11 +332,11 @@ public class PartitionedRegionStatsJUnitTest
 
     
     int numEntries = 0;
-    
+    System.out.println(">>>>> put(0)");
     pr.put(0, 0);
     numEntries++;
     pr.getDiskStore().flush();
-    
+    System.out.println(">>>>>> Flush");
     long singleEntryMemSize = stats.getLong("dataStoreBytesInUse");
     assertEquals(1 , stats.getInt("dataStoreEntryCount"));
     assertEquals(0 , diskStats.getNumOverflowBytesOnDisk());
@@ -358,6 +358,17 @@ public class PartitionedRegionStatsJUnitTest
     assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
     
     assertTrue(entryOverflowSize > 0);
+    System.out.println("Total entries in VM:::::::");
+    countEntriesInMem(pr);
+    System.out.println(">>>>>>>>>>> GET(0)");
+    pr.get(1);
+    assertEquals(singleEntryMemSize, stats.getLong("dataStoreBytesInUse"));
+    assertEquals(2 , stats.getInt("dataStoreEntryCount"));
+    assertEquals(1 , diskStats.getNumEntriesInVM());
+    assertEquals(1 , diskStats.getNumOverflowOnDisk());
+    assertEquals(stats.getLong("dataStoreBytesInUse"), getMemBytes(pr));
+    assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
+    
     
     for(; numEntries < pr.getTotalNumberOfBuckets() * 5; numEntries++) {
       pr.put(numEntries, numEntries);
@@ -386,13 +397,14 @@ public class PartitionedRegionStatsJUnitTest
     assertEquals((numEntries -1) , diskStats.getNumOverflowOnDisk());
     assertEquals(stats.getLong("dataStoreBytesInUse"), getMemBytes(pr));
     assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
-    
+
     //Get some entries to trigger evictions
     for(int i = 0; i < numEntries / 2; i++) {
+      countEntriesInMem(pr);
+      System.out.println(">>>>>>>>>> Get(" + i + ") <<<<<<<<<<<<<<<<<<<<<<<<");
       pr.get(i);
     }
     pr.getDiskStore().flush();
-    
     assertEquals(singleEntryMemSize, stats.getLong("dataStoreBytesInUse"));
     assertEquals(numEntries , stats.getInt("dataStoreEntryCount"));
     assertEquals((numEntries -1) * entryOverflowSize, diskStats.getNumOverflowBytesOnDisk());
@@ -428,16 +440,34 @@ public class PartitionedRegionStatsJUnitTest
     assertEquals((numEntries -1) , diskStats.getNumOverflowOnDisk());
     assertEquals(stats.getLong("dataStoreBytesInUse"), getMemBytes(pr));
     assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
-    
+    int entriesInMem = 1;
+
    //Put get put - seems to leave entry in memory?
+    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " + pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
+        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
+        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
+        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());
+    System.out.println(">>>>>>>>>>> PUT(update): 10");
     pr.put(10, 11);
-    pr.get(10);
+    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " + pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
+        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
+        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
+        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());    
+    System.out.println(">>>>>>>>>>> DELETE: 10");
+    pr.remove(10);
+    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " + pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
+        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
+        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
+        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());
+    System.out.println(">>>>>>>>>>> PUT(update): 10");
     pr.put(10, 12);
     
     pr.getDiskStore().flush();
     
-    int entriesInMem = 1;
-    
+    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " + pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
+        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
+        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
+        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());
     assertEquals(singleEntryMemSize * entriesInMem, stats.getLong("dataStoreBytesInUse"));
     assertEquals(numEntries , stats.getInt("dataStoreEntryCount"));
     assertEquals((numEntries - entriesInMem) * entryOverflowSize, diskStats.getNumOverflowBytesOnDisk());
@@ -446,21 +476,58 @@ public class PartitionedRegionStatsJUnitTest
     assertEquals(stats.getLong("dataStoreBytesInUse"), getMemBytes(pr));
     assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
     
+    
+    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " + pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
+        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
+        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
+        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());    
+    System.out.println(">>>>>>>>>>> DELETE: 10");
+    pr.remove(10);
+    numEntries--;
+    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " + pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
+        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
+        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
+        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());
+    
+    pr.getDiskStore().flush();
+    
+    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " + pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
+        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
+        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
+        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());
+    assertEquals(singleEntryMemSize * entriesInMem, stats.getLong("dataStoreBytesInUse"));
+    assertEquals(numEntries , stats.getInt("dataStoreEntryCount"));
+    assertEquals((numEntries - entriesInMem) * entryOverflowSize, diskStats.getNumOverflowBytesOnDisk());
+    assertEquals(entriesInMem , diskStats.getNumEntriesInVM());
+    assertEquals((numEntries - entriesInMem) , diskStats.getNumOverflowOnDisk());
+    assertEquals(stats.getLong("dataStoreBytesInUse"), getMemBytes(pr));
+    assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
     //Do some random operations
-
+    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " + pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
+        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
+        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
+        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());
     System.out.println("----Doing random operations");
     Random rand = new Random(12345L);
     for(int i =0; i < 1000; i++) {
+      System.out.println(">>>>>>> Total Entries: pr.entryCount() - " + pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
+          ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
+          ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
+          ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk() +
+          ", getDiskBytesFromBucketStats - " + getDiskBytes(pr));
       int key = rand.nextInt(numEntries);
       int op = rand.nextInt(3);
       switch(op) {
         case 0:
+          System.out.println(">>>>>>>>>>> PUT(update): " + key);
           pr.put(key, rand.nextInt());
           break;
         case 1:
+          System.out.println(">>>>>>>>>>> GET: " + key);
           pr.get(key);
           break;
         case 2:
+          System.out.println(">>>>>>>>>>> REMOVE: " + key);
           pr.remove(key);
           break;
       }
@@ -469,18 +536,21 @@ public class PartitionedRegionStatsJUnitTest
     pr.getDiskStore().flush();
     
     System.out.println("----Done with random operations");
-
+    System.out.println(">>>>>>> Total Entries: pr.entryCount() - " + pr.entryCount() + ", dataStoreEntryCount - " + stats.getInt("dataStoreEntryCount") +
+        ", getNumEntriesInVM - " + diskStats.getNumEntriesInVM() + 
+        ", getNumOverflowOnDisk:" + diskStats.getNumOverflowOnDisk() + 
+        ", getNumOverflowBytesOnDisk - " + diskStats.getNumOverflowBytesOnDisk());
+    
     numEntries = pr.entryCount();
         
     assertEquals(singleEntryMemSize * entriesInMem, stats.getLong("dataStoreBytesInUse"));
     assertEquals(numEntries , stats.getInt("dataStoreEntryCount"));
     assertEquals((numEntries - entriesInMem) * entryOverflowSize, diskStats.getNumOverflowBytesOnDisk());
-    //Disabled for GEODE-93. numEntriesInVM and numOVerflowOnDisk are incorrect
-//    assertIndexDetailsEquals(entriesInMem , diskStats.getNumEntriesInVM());
-//    assertIndexDetailsEquals((numEntries - entriesInMem) , diskStats.getNumOverflowOnDisk());
-      assertEquals(stats.getLong("dataStoreBytesInUse"), getMemBytes(pr));
-      assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
-    }
+    assertEquals(entriesInMem , diskStats.getNumEntriesInVM());
+    assertEquals((numEntries - entriesInMem) , diskStats.getNumOverflowOnDisk());
+    assertEquals(stats.getLong("dataStoreBytesInUse"), getMemBytes(pr));
+    assertEquals(diskStats.getNumOverflowBytesOnDisk(), getDiskBytes(pr));
+  }
 
   private int countEntriesInMem(PartitionedRegion pr) {
     int entriesInMem = 0;