You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by ds...@apache.org on 2015/11/24 01:17:22 UTC
[1/5] incubator-geode git commit: moved reference count debugging
code to ReferenceCountHelper
Repository: incubator-geode
Updated Branches:
refs/heads/feature/GEODE-580 3e7da9371 -> 479faec7e
moved reference count debugging code to ReferenceCountHelper
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/5ef5150b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/5ef5150b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/5ef5150b
Branch: refs/heads/feature/GEODE-580
Commit: 5ef5150b35dcdc6bff73a766cdd3f742a4844952
Parents: 3e7da93
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Mon Nov 23 11:46:40 2015 -0800
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Mon Nov 23 11:46:40 2015 -0800
----------------------------------------------------------------------
.../internal/cache/AbstractRegionEntry.java | 19 +-
.../internal/cache/AbstractRegionMap.java | 14 +-
.../gemfire/internal/cache/DiskEntry.java | 6 +-
.../gemfire/internal/cache/EntryEventImpl.java | 42 ++--
.../gemfire/internal/cache/LocalRegion.java | 8 +-
.../gemstone/gemfire/internal/cache/Oplog.java | 6 +-
.../cache/wan/GatewaySenderEventImpl.java | 6 +-
.../gemfire/internal/offheap/Chunk.java | 20 +-
.../gemfire/internal/offheap/OffHeapHelper.java | 8 +-
.../offheap/OffHeapRegionEntryHelper.java | 6 +-
.../internal/offheap/RefCountChangeInfo.java | 4 +-
.../internal/offheap/ReferenceCountHelper.java | 235 +++++++++++++++++++
.../offheap/SimpleMemoryAllocatorImpl.java | 177 +-------------
.../gemfire/internal/cache/OffHeapTestUtil.java | 3 +-
14 files changed, 311 insertions(+), 243 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ef5150b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java
index 97c2371..dd33b15 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionEntry.java
@@ -67,6 +67,7 @@ import com.gemstone.gemfire.internal.offheap.GemFireChunk;
import com.gemstone.gemfire.internal.offheap.MemoryAllocator;
import com.gemstone.gemfire.internal.offheap.OffHeapCachedDeserializable;
import com.gemstone.gemfire.internal.offheap.OffHeapHelper;
+import com.gemstone.gemfire.internal.offheap.ReferenceCountHelper;
import com.gemstone.gemfire.internal.offheap.SimpleMemoryAllocatorImpl;
import com.gemstone.gemfire.internal.offheap.StoredObject;
import com.gemstone.gemfire.internal.offheap.annotations.Released;
@@ -415,7 +416,7 @@ public abstract class AbstractRegionEntry implements RegionEntry,
@Override
public Object getValue(RegionEntryContext context) {
- SimpleMemoryAllocatorImpl.createReferenceCountOwner();
+ ReferenceCountHelper.createReferenceCountOwner();
@Retained Object result = _getValueRetain(context, true);
//Asif: If the thread is an Index Creation Thread & the value obtained is
//Token.REMOVED , we can skip synchronization block. This is required to prevent
@@ -431,11 +432,11 @@ public abstract class AbstractRegionEntry implements RegionEntry,
// }
if (Token.isRemoved(result)) {
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
return null;
} else {
result = OffHeapHelper.copyAndReleaseIfNeeded(result); // sqlf does not dec ref count in this call
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
setRecentlyUsed();
return result;
}
@@ -564,14 +565,14 @@ public abstract class AbstractRegionEntry implements RegionEntry,
@Retained
public final Object getValueInVM(RegionEntryContext context) {
- SimpleMemoryAllocatorImpl.createReferenceCountOwner();
+ ReferenceCountHelper.createReferenceCountOwner();
@Retained Object v = _getValueRetain(context, true);
if (v == null) { // should only be possible if disk entry
v = Token.NOT_AVAILABLE;
}
@Retained Object result = OffHeapHelper.copyAndReleaseIfNeeded(v); // TODO OFFHEAP keep it offheap?
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
return result;
}
@@ -758,9 +759,9 @@ public abstract class AbstractRegionEntry implements RegionEntry,
// :ezoerner:20080814 We also read old value from disk or buffer
// in the case where there is a non-null expectedOldValue
// see PartitionedRegion#remove(Object key, Object value)
- SimpleMemoryAllocatorImpl.skipRefCountTracking();
+ ReferenceCountHelper.skipRefCountTracking();
@Retained @Released Object curValue = _getValueRetain(region, true);
- SimpleMemoryAllocatorImpl.unskipRefCountTracking();
+ ReferenceCountHelper.unskipRefCountTracking();
try {
if (curValue == null) curValue = Token.NOT_AVAILABLE;
@@ -1343,10 +1344,10 @@ public abstract class AbstractRegionEntry implements RegionEntry,
}
byte[] compressedData = compressBytes(r, data);
boolean isCompressed = compressedData != data;
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(this);
+ ReferenceCountHelper.setReferenceCountOwner(this);
MemoryAllocator ma = SimpleMemoryAllocatorImpl.getAllocator(); // fix for bug 47875
val = ma.allocateAndInitialize(compressedData, isSerialized, isCompressed, GemFireChunk.TYPE); // TODO:KIRK:48068 race happens right after this line
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
if (val instanceof GemFireChunk) {
val = new com.gemstone.gemfire.internal.offheap.ChunkWithHeapForm((GemFireChunk)val, data);
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ef5150b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
index 5a2a89e..6d49d74 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/AbstractRegionMap.java
@@ -77,7 +77,7 @@ import com.gemstone.gemfire.internal.logging.log4j.LogMarker;
import com.gemstone.gemfire.internal.offheap.Chunk;
import com.gemstone.gemfire.internal.offheap.OffHeapHelper;
import com.gemstone.gemfire.internal.offheap.OffHeapRegionEntryHelper;
-import com.gemstone.gemfire.internal.offheap.SimpleMemoryAllocatorImpl;
+import com.gemstone.gemfire.internal.offheap.ReferenceCountHelper;
import com.gemstone.gemfire.internal.offheap.annotations.Released;
import com.gemstone.gemfire.internal.offheap.annotations.Retained;
import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
@@ -3357,11 +3357,11 @@ RETRY_LOOP:
// replace is propagated to server, so no need to check
// satisfiesOldValue on client
if (expectedOldValue != null && !replaceOnClient) {
- SimpleMemoryAllocatorImpl.skipRefCountTracking();
+ ReferenceCountHelper.skipRefCountTracking();
@Retained @Released Object v = re._getValueRetain(event.getLocalRegion(), true);
- SimpleMemoryAllocatorImpl.unskipRefCountTracking();
+ ReferenceCountHelper.unskipRefCountTracking();
try {
if (!AbstractRegionEntry.checkExpectedOldValue(expectedOldValue, v, event.getLocalRegion())) {
return false;
@@ -3392,9 +3392,9 @@ RETRY_LOOP:
if (event.hasDelta() || event.getOperation().guaranteesOldValue()
|| GemFireCacheImpl.sqlfSystem()) {
// In these cases we want to even get the old value from disk if it is not in memory
- SimpleMemoryAllocatorImpl.skipRefCountTracking();
+ ReferenceCountHelper.skipRefCountTracking();
@Released Object oldValueInVMOrDisk = re.getValueOffHeapOrDiskWithoutFaultIn(event.getLocalRegion());
- SimpleMemoryAllocatorImpl.unskipRefCountTracking();
+ ReferenceCountHelper.unskipRefCountTracking();
try {
event.setOldValue(oldValueInVMOrDisk, requireOldValue
|| GemFireCacheImpl.sqlfSystem());
@@ -3403,11 +3403,11 @@ RETRY_LOOP:
}
} else {
// In these cases only need the old value if it is in memory
- SimpleMemoryAllocatorImpl.skipRefCountTracking();
+ ReferenceCountHelper.skipRefCountTracking();
@Retained @Released Object oldValueInVM = re._getValueRetain(event.getLocalRegion(), true); // OFFHEAP: re synced so can use its ref.
- SimpleMemoryAllocatorImpl.unskipRefCountTracking();
+ ReferenceCountHelper.unskipRefCountTracking();
try {
event.setOldValue(oldValueInVM,
requireOldValue || GemFireCacheImpl.sqlfSystem());
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ef5150b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java
index b64b6a0..c855cca 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/DiskEntry.java
@@ -42,8 +42,8 @@ import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
import com.gemstone.gemfire.internal.logging.LogService;
import com.gemstone.gemfire.internal.offheap.Chunk;
import com.gemstone.gemfire.internal.offheap.OffHeapHelper;
+import com.gemstone.gemfire.internal.offheap.ReferenceCountHelper;
import com.gemstone.gemfire.internal.offheap.Releasable;
-import com.gemstone.gemfire.internal.offheap.SimpleMemoryAllocatorImpl;
import com.gemstone.gemfire.internal.offheap.UnsafeMemoryChunk;
import com.gemstone.gemfire.internal.offheap.StoredObject;
import com.gemstone.gemfire.internal.offheap.annotations.Released;
@@ -340,9 +340,9 @@ public interface DiskEntry extends RegionEntry {
synchronized (syncObj) {
entry.setLastModified(mgr, de.getLastModified());
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(entry);
+ ReferenceCountHelper.setReferenceCountOwner(entry);
v = de._getValueRetain(context, true); // OFFHEAP copied to heap entry; todo allow entry to refer to offheap since it will be copied to network.
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
if (v == null) {
if (did == null) {
// fix for bug 41449
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ef5150b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java
index fe9a4ac..9cf2f13 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/EntryEventImpl.java
@@ -76,8 +76,8 @@ import com.gemstone.gemfire.internal.logging.log4j.LogMarker;
import com.gemstone.gemfire.internal.offheap.Chunk;
import com.gemstone.gemfire.internal.offheap.OffHeapHelper;
import com.gemstone.gemfire.internal.offheap.OffHeapRegionEntryHelper;
+import com.gemstone.gemfire.internal.offheap.ReferenceCountHelper;
import com.gemstone.gemfire.internal.offheap.Releasable;
-import com.gemstone.gemfire.internal.offheap.SimpleMemoryAllocatorImpl;
import com.gemstone.gemfire.internal.offheap.StoredObject;
import com.gemstone.gemfire.internal.offheap.annotations.Released;
import com.gemstone.gemfire.internal.offheap.annotations.Retained;
@@ -931,13 +931,13 @@ public class EntryEventImpl
OffHeapHelper.releaseAndTrackOwner(this.newValue, this);
}
if (v instanceof Chunk) {
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(this);
+ ReferenceCountHelper.setReferenceCountOwner(this);
if (!((Chunk) v).retain()) {
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
this.newValue = null;
return;
}
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
}
this.newValue = v;
this.cachedSerializedNewValue = null;
@@ -993,7 +993,7 @@ public class EntryEventImpl
if (v == curOldValue) return;
if (this.offHeapOk) {
if (curOldValue instanceof Chunk) {
- if (SimpleMemoryAllocatorImpl.trackReferenceCounts()) {
+ if (ReferenceCountHelper.trackReferenceCounts()) {
OffHeapHelper.releaseAndTrackOwner(curOldValue, new OldValueOwner());
} else {
OffHeapHelper.release(curOldValue);
@@ -1009,10 +1009,10 @@ public class EntryEventImpl
if (v == this.oldValue) return;
if (v instanceof Chunk) {
- if (SimpleMemoryAllocatorImpl.trackReferenceCounts()) {
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(new OldValueOwner());
+ if (ReferenceCountHelper.trackReferenceCounts()) {
+ ReferenceCountHelper.setReferenceCountOwner(new OldValueOwner());
boolean couldNotRetain = (!((Chunk) v).retain());
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
if (couldNotRetain) {
this.oldValue = null;
return;
@@ -1738,14 +1738,14 @@ public class EntryEventImpl
|| GemFireCacheImpl.sqlfSystem()
) {
@Retained Object ov;
- if (SimpleMemoryAllocatorImpl.trackReferenceCounts()) {
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(new OldValueOwner());
+ if (ReferenceCountHelper.trackReferenceCounts()) {
+ ReferenceCountHelper.setReferenceCountOwner(new OldValueOwner());
if (GemFireCacheImpl.sqlfSystem()) {
ov = reentry.getValueOffHeapOrDiskWithoutFaultIn(this.region);
} else {
ov = reentry._getValueRetain(owner, true);
}
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
} else {
if (GemFireCacheImpl.sqlfSystem()) {
ov = reentry.getValueOffHeapOrDiskWithoutFaultIn(this.region);
@@ -2088,9 +2088,9 @@ public class EntryEventImpl
try {
RegionEntry re = this.region.getRegionEntry(getKey());
if (re == null) return false;
- SimpleMemoryAllocatorImpl.skipRefCountTracking();
+ ReferenceCountHelper.skipRefCountTracking();
Object v = re._getValueRetain(this.region, true);
- SimpleMemoryAllocatorImpl.unskipRefCountTracking();
+ ReferenceCountHelper.unskipRefCountTracking();
try {
return setOldValue(v);
} finally {
@@ -3055,10 +3055,10 @@ public class EntryEventImpl
if (ov instanceof Chunk) {
//this.region.getCache().getLogger().info("DEBUG freeing ref to old value on " + System.identityHashCode(ov));
- if (SimpleMemoryAllocatorImpl.trackReferenceCounts()) {
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(new OldValueOwner());
+ if (ReferenceCountHelper.trackReferenceCounts()) {
+ ReferenceCountHelper.setReferenceCountOwner(new OldValueOwner());
((Chunk) ov).release();
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
} else {
((Chunk) ov).release();
}
@@ -3086,19 +3086,19 @@ public class EntryEventImpl
public void copyOffHeapToHeap() {
Object ov = basicGetOldValue();
if (ov instanceof Chunk) {
- if (SimpleMemoryAllocatorImpl.trackReferenceCounts()) {
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(new OldValueOwner());
+ if (ReferenceCountHelper.trackReferenceCounts()) {
+ ReferenceCountHelper.setReferenceCountOwner(new OldValueOwner());
this.oldValue = OffHeapHelper.copyAndReleaseIfNeeded(ov);
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
} else {
this.oldValue = OffHeapHelper.copyAndReleaseIfNeeded(ov);
}
}
Object nv = basicGetNewValue();
if (nv instanceof Chunk) {
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(this);
+ ReferenceCountHelper.setReferenceCountOwner(this);
this.newValue = OffHeapHelper.copyAndReleaseIfNeeded(nv);
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
}
if (this.newValue instanceof Chunk || this.oldValue instanceof Chunk) {
throw new IllegalStateException("event's old/new value still off-heap after calling copyOffHeapToHeap");
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ef5150b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
index 9d88834..231913b 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/LocalRegion.java
@@ -205,7 +205,7 @@ import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
import com.gemstone.gemfire.internal.logging.log4j.LogMarker;
import com.gemstone.gemfire.internal.offheap.Chunk;
import com.gemstone.gemfire.internal.offheap.OffHeapHelper;
-import com.gemstone.gemfire.internal.offheap.SimpleMemoryAllocatorImpl;
+import com.gemstone.gemfire.internal.offheap.ReferenceCountHelper;
import com.gemstone.gemfire.internal.offheap.StoredObject;
import com.gemstone.gemfire.internal.offheap.annotations.Retained;
import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
@@ -2178,14 +2178,14 @@ public class LocalRegion extends AbstractRegion
RegionEntry entry = this.entries.getEntry(keyInfo.getKey());
boolean result = entry != null;
if (result) {
- SimpleMemoryAllocatorImpl.skipRefCountTracking();
+ ReferenceCountHelper.skipRefCountTracking();
Object val = entry.getTransformedValue(); // no need to decompress since we only want to know if we have an existing value
if (val instanceof StoredObject) {
OffHeapHelper.release(val);
- SimpleMemoryAllocatorImpl.unskipRefCountTracking();
+ ReferenceCountHelper.unskipRefCountTracking();
return true;
}
- SimpleMemoryAllocatorImpl.unskipRefCountTracking();
+ ReferenceCountHelper.unskipRefCountTracking();
// No need to to check CachedDeserializable because of Bruce's fix in r30960 for bug 42162. See bug 42732.
// this works because INVALID and LOCAL_INVALID will never be faulted out of mem
// If val is NOT_AVAILABLE that means we have a valid value on disk.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ef5150b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java
index cd197f2..3a2d1ed 100755
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/Oplog.java
@@ -108,7 +108,7 @@ import com.gemstone.gemfire.internal.logging.LogService;
import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
import com.gemstone.gemfire.internal.logging.log4j.LogMarker;
import com.gemstone.gemfire.internal.offheap.OffHeapHelper;
-import com.gemstone.gemfire.internal.offheap.SimpleMemoryAllocatorImpl;
+import com.gemstone.gemfire.internal.offheap.ReferenceCountHelper;
import com.gemstone.gemfire.internal.offheap.StoredObject;
import com.gemstone.gemfire.internal.offheap.annotations.Released;
import com.gemstone.gemfire.internal.offheap.annotations.Retained;
@@ -4310,10 +4310,10 @@ public final class Oplog implements CompactableOplog, Flushable {
DiskId did = entry.getDiskId();
byte userBits = 0;
long oplogOffset = did.getOffsetInOplog();
- SimpleMemoryAllocatorImpl.skipRefCountTracking();
+ ReferenceCountHelper.skipRefCountTracking();
// TODO OFFHEAP: no need to retain. We just use it while we have the RegionEntry synced.
@Retained @Released Object value = entry._getValueRetain(dr, true);
- SimpleMemoryAllocatorImpl.unskipRefCountTracking();
+ ReferenceCountHelper.unskipRefCountTracking();
// TODO:KIRK:OK Object value = entry.getValueWithContext(dr);
boolean foundData = false;
if (value == null) {
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ef5150b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderEventImpl.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderEventImpl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderEventImpl.java
index f7bf976..4df8f35 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderEventImpl.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/wan/GatewaySenderEventImpl.java
@@ -53,8 +53,8 @@ import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
import com.gemstone.gemfire.internal.offheap.Chunk;
import com.gemstone.gemfire.internal.offheap.ChunkWithHeapForm;
import com.gemstone.gemfire.internal.offheap.OffHeapHelper;
+import com.gemstone.gemfire.internal.offheap.ReferenceCountHelper;
import com.gemstone.gemfire.internal.offheap.Releasable;
-import com.gemstone.gemfire.internal.offheap.SimpleMemoryAllocatorImpl;
import com.gemstone.gemfire.internal.offheap.StoredObject;
import com.gemstone.gemfire.internal.offheap.annotations.OffHeapIdentifier;
import com.gemstone.gemfire.internal.offheap.annotations.Released;
@@ -935,9 +935,9 @@ public class GatewaySenderEventImpl implements
if (event.hasDelta()) {
this.valueIsObject = 0x02;
} else {
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(this);
+ ReferenceCountHelper.setReferenceCountOwner(this);
so = event.getOffHeapNewValue();
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
// TODO OFFHEAP MERGE: check for a cached serialized value first
// so we can use it instead of reading offheap
// If we do read offheap then add the serialize new value to the event cache
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ef5150b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/Chunk.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/Chunk.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/Chunk.java
index d2c79c0..e32a1c6 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/Chunk.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/Chunk.java
@@ -671,8 +671,8 @@ import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
}
} while (!UnsafeMemoryChunk.writeAbsoluteIntVolatile(memAddr+REF_COUNT_OFFSET, rawBits, rawBits+1));
//debugLog("use inced ref count " + (uc+1) + " @" + Long.toHexString(memAddr), true);
- if (SimpleMemoryAllocatorImpl.trackReferenceCounts()) {
- SimpleMemoryAllocatorImpl.refCountChanged(memAddr, false, uc+1);
+ if (ReferenceCountHelper.trackReferenceCounts()) {
+ ReferenceCountHelper.refCountChanged(memAddr, false, uc+1);
}
return true;
@@ -686,14 +686,14 @@ import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
returnToAllocator = false;
rawBits = UnsafeMemoryChunk.readAbsoluteIntVolatile(memAddr+REF_COUNT_OFFSET);
if ((rawBits&MAGIC_MASK) != MAGIC_NUMBER) {
- String msg = "It looks like off heap memory @" + Long.toHexString(memAddr) + " was already freed. rawBits=" + Integer.toHexString(rawBits) + " history=" + SimpleMemoryAllocatorImpl.getFreeRefCountInfo(memAddr);
+ String msg = "It looks like off heap memory @" + Long.toHexString(memAddr) + " was already freed. rawBits=" + Integer.toHexString(rawBits) + " history=" + ReferenceCountHelper.getFreeRefCountInfo(memAddr);
//debugLog(msg, true);
throw new IllegalStateException(msg);
}
int curCount = rawBits&REF_COUNT_MASK;
if ((curCount) == 0) {
//debugLog("too many frees @" + Long.toHexString(memAddr), true);
- throw new IllegalStateException("Memory has already been freed." + " history=" + SimpleMemoryAllocatorImpl.getFreeRefCountInfo(memAddr) /*+ System.identityHashCode(this)*/);
+ throw new IllegalStateException("Memory has already been freed." + " history=" + ReferenceCountHelper.getFreeRefCountInfo(memAddr) /*+ System.identityHashCode(this)*/);
}
if (curCount == 1) {
newCount = 0; // clear the use count, bits, and the delta size since it will be freed.
@@ -716,11 +716,11 @@ import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
}
*/
- if (SimpleMemoryAllocatorImpl.trackReferenceCounts()) {
- if (SimpleMemoryAllocatorImpl.trackFreedReferenceCounts()) {
- SimpleMemoryAllocatorImpl.refCountChanged(memAddr, true, newCount&REF_COUNT_MASK);
+ if (ReferenceCountHelper.trackReferenceCounts()) {
+ if (ReferenceCountHelper.trackFreedReferenceCounts()) {
+ ReferenceCountHelper.refCountChanged(memAddr, true, newCount&REF_COUNT_MASK);
}
- SimpleMemoryAllocatorImpl.freeRefCountInfo(memAddr);
+ ReferenceCountHelper.freeRefCountInfo(memAddr);
}
// Use fill pattern for free list data integrity check.
@@ -730,8 +730,8 @@ import com.gemstone.gemfire.internal.offheap.annotations.Unretained;
SimpleMemoryAllocatorImpl.getAllocator().freeChunk(memAddr);
} else {
- if (SimpleMemoryAllocatorImpl.trackReferenceCounts()) {
- SimpleMemoryAllocatorImpl.refCountChanged(memAddr, true, newCount&REF_COUNT_MASK);
+ if (ReferenceCountHelper.trackReferenceCounts()) {
+ ReferenceCountHelper.refCountChanged(memAddr, true, newCount&REF_COUNT_MASK);
}
}
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ef5150b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapHelper.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapHelper.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapHelper.java
index b5677cd..4845931 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapHelper.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapHelper.java
@@ -106,9 +106,9 @@ public class OffHeapHelper {
*/
public static boolean releaseWithNoTracking(@Released Object o) {
if (o instanceof MemoryChunkWithRefCount) {
- SimpleMemoryAllocatorImpl.skipRefCountTracking();
+ ReferenceCountHelper.skipRefCountTracking();
((MemoryChunkWithRefCount) o).release();
- SimpleMemoryAllocatorImpl.unskipRefCountTracking();
+ ReferenceCountHelper.unskipRefCountTracking();
return true;
} else {
return false;
@@ -121,9 +121,9 @@ public class OffHeapHelper {
*/
public static boolean releaseAndTrackOwner(@Released final Object o, final Object owner) {
if (o instanceof MemoryChunkWithRefCount) {
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(owner);
+ ReferenceCountHelper.setReferenceCountOwner(owner);
((MemoryChunkWithRefCount) o).release();
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
return true;
} else {
return false;
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ef5150b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionEntryHelper.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionEntryHelper.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionEntryHelper.java
index 34c3468..d1a81f0 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionEntryHelper.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionEntryHelper.java
@@ -369,9 +369,9 @@ public class OffHeapRegionEntryHelper {
do {
oldAddress = re.getAddress();
} while (!re.setAddress(oldAddress, newAddress));
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(re);
+ ReferenceCountHelper.setReferenceCountOwner(re);
releaseAddress(oldAddress);
- SimpleMemoryAllocatorImpl.setReferenceCountOwner(null);
+ ReferenceCountHelper.setReferenceCountOwner(null);
}
public static Token getValueAsToken(@Unretained OffHeapRegionEntry re) {
@@ -420,7 +420,7 @@ public class OffHeapRegionEntryHelper {
long addr2 = re.getAddress();
retryCount++;
if (retryCount > 100) {
- throw new IllegalStateException("retain failed addr=" + addr + " addr2=" + addr + " 100 times" + " history=" + SimpleMemoryAllocatorImpl.getFreeRefCountInfo(addr));
+ throw new IllegalStateException("retain failed addr=" + addr + " addr2=" + addr + " 100 times" + " history=" + ReferenceCountHelper.getFreeRefCountInfo(addr));
}
addr = addr2;
// Since retain returned false our region entry should have a different
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ef5150b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/RefCountChangeInfo.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/RefCountChangeInfo.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/RefCountChangeInfo.java
index 47cc139..56cab97 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/RefCountChangeInfo.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/RefCountChangeInfo.java
@@ -31,11 +31,11 @@ public class RefCountChangeInfo extends Throwable {
private final Object owner;
private int dupCount;
- public RefCountChangeInfo(boolean decRefCount, int rc) {
+ public RefCountChangeInfo(boolean decRefCount, int rc, Object owner) {
super(decRefCount ? "FREE" : "USED");
this.threadName = Thread.currentThread().getName();
this.rc = rc;
- this.owner = SimpleMemoryAllocatorImpl.refCountOwner.get();
+ this.owner = owner;
}
public Object getOwner() {
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ef5150b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/ReferenceCountHelper.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/ReferenceCountHelper.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/ReferenceCountHelper.java
new file mode 100644
index 0000000..85a0505
--- /dev/null
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/ReferenceCountHelper.java
@@ -0,0 +1,235 @@
+package com.gemstone.gemfire.internal.offheap;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.gemstone.gemfire.internal.cache.RegionEntry;
+
+/**
+ * This class provides static methods to help
+ * debug off-heap reference count problems.
+ * To enable reference count tracking set: -Dgemfire.trackOffHeapRefCounts=true.
+ * To enable free operation tracking set: -Dgemfire.trackOffHeapFreedRefCounts=true.
+ */
+public class ReferenceCountHelper {
+ private ReferenceCountHelper() {
+ // no instances allowed
+ }
+ final static private boolean trackRefCounts = Boolean.getBoolean("gemfire.trackOffHeapRefCounts");
+ final static private boolean trackFreedRefCounts = Boolean.getBoolean("gemfire.trackOffHeapFreedRefCounts");
+ final static private ConcurrentMap<Long, List<RefCountChangeInfo>> stacktraces;
+ final static private ConcurrentMap<Long, List<RefCountChangeInfo>> freedStacktraces;
+ final static private ThreadLocal<Object> refCountOwner;
+ final static private ThreadLocal<AtomicInteger> refCountReenterCount;
+ final static private Object SKIP_REF_COUNT_TRACKING = new Object();
+ final static private List<RefCountChangeInfo> LOCKED = Collections.emptyList();
+
+ static {
+ if (trackRefCounts) {
+ stacktraces = new ConcurrentHashMap<Long, List<RefCountChangeInfo>>();
+ if (trackFreedRefCounts) {
+ freedStacktraces = new ConcurrentHashMap<Long, List<RefCountChangeInfo>>();
+ } else {
+ freedStacktraces = null;
+ }
+ refCountOwner = new ThreadLocal<Object>();
+ refCountReenterCount = new ThreadLocal<AtomicInteger>();
+ } else {
+ stacktraces = null;
+ freedStacktraces = null;
+ refCountOwner = null;
+ refCountReenterCount = null;
+ }
+ }
+
+ /**
+ * Returns true if reference count tracking is enabled.
+ */
+ public static boolean trackReferenceCounts() {
+ return trackRefCounts;
+ }
+
+ /**
+ * Returns true if free operation tracking is enabled.
+ */
+ public static boolean trackFreedReferenceCounts() {
+ return trackFreedRefCounts;
+ }
+
+ /**
+ * Optional call to tell the tracker the logical "owner"
+ * of the reference count. For example you could set
+ * the particular EntryEventImpl instance that incremented
+ * the reference count and is responsible for decrementing it.
+ * Calling this method is a noop if !trackReferenceCounts.
+ */
+ public static void setReferenceCountOwner(Object owner) {
+ if (trackReferenceCounts()) {
+ if (refCountOwner.get() != null) {
+ AtomicInteger ai = refCountReenterCount.get();
+ if (owner != null) {
+ ai.incrementAndGet();
+ } else {
+ if (ai.decrementAndGet() <= 0) {
+ refCountOwner.set(null);
+ ai.set(0);
+ }
+ }
+ } else {
+ AtomicInteger ai = refCountReenterCount.get();
+ if (ai == null) {
+ ai = new AtomicInteger(0);
+ refCountReenterCount.set(ai);
+ }
+ if (owner != null) {
+ ai.set(1);
+ } else {
+ ai.set(0);
+ }
+ refCountOwner.set(owner);
+ }
+ }
+ }
+
+ /**
+ * Create, set, and return a generic reference count owner object.
+ * Calling this method is a noop and returns null if !trackReferenceCounts.
+ */
+ public static Object createReferenceCountOwner() {
+ Object result = null;
+ if (trackReferenceCounts()) {
+ result = new Object();
+ setReferenceCountOwner(result);
+ }
+ return result;
+ }
+
+ /**
+ * Call this method before incrementing a reference count
+ * if you know that tracking is not needed because you know
+ * that the allocate and free will always be done in the same
+ * code block.
+ * Callers of this method must also call unskipRefCountTracking
+ * after the allocation or free is done.
+ */
+ public static void skipRefCountTracking() {
+ setReferenceCountOwner(SKIP_REF_COUNT_TRACKING);
+ }
+
+ /**
+ * Call this method to undo a call to skipRefCountTracking.
+ */
+ public static void unskipRefCountTracking() {
+ setReferenceCountOwner(null);
+ }
+
+ /**
+ * Returns a list of any reference count tracking information for
+ * the given Chunk address.
+ */
+ public static List<RefCountChangeInfo> getRefCountInfo(long address) {
+ if (!trackReferenceCounts()) return null;
+ List<RefCountChangeInfo> result = stacktraces.get(address);
+ while (result != null && !stacktraces.replace(address, result, LOCKED)) {
+ result = stacktraces.get(address);
+ }
+ return result;
+ }
+
+ /**
+ * Returns a list of any free operation tracking information.
+ * This is used to describe who did the previous free(s) when an extra one
+ * ends up being done and fails.
+ */
+ public static List<RefCountChangeInfo> getFreeRefCountInfo(long address) {
+ if (!trackReferenceCounts() || !trackFreedReferenceCounts()) return null;
+ return freedStacktraces.get(address);
+ }
+
+ /**
+ * Used internally to report that a reference count has changed.
+ */
+ static void refCountChanged(Long address, boolean decRefCount, int rc) {
+ final Object owner = refCountOwner.get();
+ if (owner == SKIP_REF_COUNT_TRACKING) {
+ return;
+ }
+ List<RefCountChangeInfo> list = stacktraces.get(address);
+ if (list == null) {
+ List<RefCountChangeInfo> newList = new ArrayList<RefCountChangeInfo>();
+ List<RefCountChangeInfo> old = stacktraces.putIfAbsent(address, newList);
+ if (old == null) {
+ list = newList;
+ } else {
+ list = old;
+ }
+ }
+ if (decRefCount) {
+ if (owner != null) {
+ synchronized (list) {
+ for (int i=0; i < list.size(); i++) {
+ RefCountChangeInfo info = list.get(i);
+ if (owner instanceof RegionEntry) {
+ // use identity comparison on region entries since sqlf does some wierd stuff in the equals method
+ if (owner == info.getOwner()) {
+ if (info.getDupCount() > 0) {
+ info.decDupCount();
+ } else {
+ list.remove(i);
+ }
+ return;
+ }
+ } else if (owner.equals(info.getOwner())) {
+ if (info.getDupCount() > 0) {
+ info.decDupCount();
+ } else {
+ list.remove(i);
+ }
+ return;
+ }
+ }
+ }
+ }
+ }
+ if (list == LOCKED) {
+ SimpleMemoryAllocatorImpl.debugLog("refCount " + (decRefCount ? "deced" : "inced") + " after orphan detected for @" + Long.toHexString(address), true);
+ return;
+ }
+ RefCountChangeInfo info = new RefCountChangeInfo(decRefCount, rc, owner);
+ synchronized (list) {
+ // if (list.size() == 16) {
+ // debugLog("dumping @" + Long.toHexString(address) + " history=" + list, false);
+ // list.clear();
+ // }
+ for (RefCountChangeInfo e: list) {
+ if (e.isDuplicate(info)) {
+ // No need to add it
+ return;
+ }
+ }
+ list.add(info);
+ }
+ }
+
+ /**
+ * Called internally when free operations are tracked to record
+ * that a free has happened of the given address.
+ */
+ static void freeRefCountInfo(Long address) {
+ if (!trackReferenceCounts()) return;
+ List<RefCountChangeInfo> freedInfo = stacktraces.remove(address);
+ if (freedInfo == LOCKED) {
+ SimpleMemoryAllocatorImpl.debugLog("freed after orphan detected for @" + Long.toHexString(address), true);
+ } else if (trackFreedReferenceCounts()) {
+ if (freedInfo != null) {
+ freedStacktraces.put(address, freedInfo);
+ } else {
+ freedStacktraces.remove(address);
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ef5150b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
index 688fbe0..eacfa06 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
@@ -25,10 +25,7 @@ import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.atomic.AtomicReferenceArray;
@@ -368,8 +365,8 @@ public final class SimpleMemoryAllocatorImpl implements MemoryAllocator, MemoryI
//System.out.println("allocating " + size);
Chunk result = this.freeList.allocate(size, chunkType);
//("allocated off heap object of size " + size + " @" + Long.toHexString(result.getMemoryAddress()), true);
- if (trackReferenceCounts()) {
- refCountChanged(result.getMemoryAddress(), false, 1);
+ if (ReferenceCountHelper.trackReferenceCounts()) {
+ ReferenceCountHelper.refCountChanged(result.getMemoryAddress(), false, 1);
}
return result;
}
@@ -396,8 +393,8 @@ public final class SimpleMemoryAllocatorImpl implements MemoryAllocator, MemoryI
Chunk result = this.freeList.allocate(v.length, chunkType);
//debugLog("allocated off heap object of size " + v.length + " @" + Long.toHexString(result.getMemoryAddress()), true);
//debugLog("allocated off heap object of size " + v.length + " @" + Long.toHexString(result.getMemoryAddress()) + "chunkSize=" + result.getSize() + " isSerialized=" + isSerialized + " v=" + Arrays.toString(v), true);
- if (trackReferenceCounts()) {
- refCountChanged(result.getMemoryAddress(), false, 1);
+ if (ReferenceCountHelper.trackReferenceCounts()) {
+ ReferenceCountHelper.refCountChanged(result.getMemoryAddress(), false, 1);
}
assert result.getChunkType() == chunkType: "chunkType=" + chunkType + " getChunkType()=" + result.getChunkType();
result.setSerializedValue(v);
@@ -841,172 +838,6 @@ public final class SimpleMemoryAllocatorImpl implements MemoryAllocator, MemoryI
*/
final boolean validateMemoryWithFill = Boolean.getBoolean("gemfire.validateOffHeapWithFill");
- private final static boolean trackRefCounts = Boolean.getBoolean("gemfire.trackOffHeapRefCounts");
- private final static boolean trackFreedRefCounts = Boolean.getBoolean("gemfire.trackOffHeapFreedRefCounts");
- private final static ConcurrentMap<Long, List<RefCountChangeInfo>> stacktraces;
- private final static ConcurrentMap<Long, List<RefCountChangeInfo>> freedStacktraces;
- final static ThreadLocal<Object> refCountOwner;
- private final static ThreadLocal<AtomicInteger> refCountReenterCount;
- static {
- if (trackRefCounts) {
- stacktraces = new ConcurrentHashMap<Long, List<RefCountChangeInfo>>();
- if (trackFreedRefCounts) {
- freedStacktraces = new ConcurrentHashMap<Long, List<RefCountChangeInfo>>();
- } else {
- freedStacktraces = null;
- }
- refCountOwner = new ThreadLocal<Object>();
- refCountReenterCount = new ThreadLocal<AtomicInteger>();
- } else {
- stacktraces = null;
- freedStacktraces = null;
- refCountOwner = null;
- refCountReenterCount = null;
- }
- }
-
- public static boolean trackReferenceCounts() {
- return trackRefCounts;
- }
- public static boolean trackFreedReferenceCounts() {
- return trackFreedRefCounts;
- }
- public static void setReferenceCountOwner(Object owner) {
- if (trackReferenceCounts()) {
- if (refCountOwner.get() != null) {
- AtomicInteger ai = refCountReenterCount.get();
- if (owner != null) {
- ai.incrementAndGet();
- } else {
- if (ai.decrementAndGet() <= 0) {
- refCountOwner.set(null);
- ai.set(0);
- }
- }
- } else {
- AtomicInteger ai = refCountReenterCount.get();
- if (ai == null) {
- ai = new AtomicInteger(0);
- refCountReenterCount.set(ai);
- }
- if (owner != null) {
- ai.set(1);
- } else {
- ai.set(0);
- }
- refCountOwner.set(owner);
- }
- }
- }
- public static Object createReferenceCountOwner() {
- Object result = null;
- if (trackReferenceCounts()) {
- result = new Object();
- setReferenceCountOwner(result);
- }
- return result;
- }
-
- private static final Object SKIP_REF_COUNT_TRACKING = new Object();
-
- public static void skipRefCountTracking() {
- setReferenceCountOwner(SKIP_REF_COUNT_TRACKING);
- }
- public static void unskipRefCountTracking() {
- setReferenceCountOwner(null);
- }
-
- static void refCountChanged(Long address, boolean decRefCount, int rc) {
- final Object owner = refCountOwner.get();
- if (owner == SKIP_REF_COUNT_TRACKING) {
- return;
- }
- List<RefCountChangeInfo> list = stacktraces.get(address);
- if (list == null) {
- List<RefCountChangeInfo> newList = new ArrayList<RefCountChangeInfo>();
- List<RefCountChangeInfo> old = stacktraces.putIfAbsent(address, newList);
- if (old == null) {
- list = newList;
- } else {
- list = old;
- }
- }
- if (decRefCount) {
- if (owner != null) {
- synchronized (list) {
- for (int i=0; i < list.size(); i++) {
- RefCountChangeInfo info = list.get(i);
- if (owner instanceof RegionEntry) {
- // use identity comparison on region entries since sqlf does some wierd stuff in the equals method
- if (owner == info.getOwner()) {
- if (info.getDupCount() > 0) {
- info.decDupCount();
- } else {
- list.remove(i);
- }
- return;
- }
- } else if (owner.equals(info.getOwner())) {
- if (info.getDupCount() > 0) {
- info.decDupCount();
- } else {
- list.remove(i);
- }
- return;
- }
- }
- }
- }
- }
- if (list == LOCKED) {
- debugLog("refCount " + (decRefCount ? "deced" : "inced") + " after orphan detected for @" + Long.toHexString(address), true);
- return;
- }
- RefCountChangeInfo info = new RefCountChangeInfo(decRefCount, rc);
- synchronized (list) {
-// if (list.size() == 16) {
-// debugLog("dumping @" + Long.toHexString(address) + " history=" + list, false);
-// list.clear();
-// }
- for (RefCountChangeInfo e: list) {
- if (e.isDuplicate(info)) {
- // No need to add it
- return;
- }
- }
- list.add(info);
- }
- }
-
- private static List<RefCountChangeInfo> LOCKED = Collections.emptyList();
-
- public static List<RefCountChangeInfo> getRefCountInfo(long address) {
- if (!trackReferenceCounts()) return null;
- List<RefCountChangeInfo> result = stacktraces.get(address);
- while (result != null && !stacktraces.replace(address, result, LOCKED)) {
- result = stacktraces.get(address);
- }
- return result;
- }
- public static List<RefCountChangeInfo> getFreeRefCountInfo(long address) {
- if (!trackReferenceCounts() || !trackFreedReferenceCounts()) return null;
- return freedStacktraces.get(address);
- }
-
- public static void freeRefCountInfo(Long address) {
- if (!trackReferenceCounts()) return;
- List<RefCountChangeInfo> freedInfo = stacktraces.remove(address);
- if (freedInfo == LOCKED) {
- debugLog("freed after orphan detected for @" + Long.toHexString(address), true);
- } else if (trackFreedReferenceCounts()) {
- if (freedInfo != null) {
- freedStacktraces.put(address, freedInfo);
- } else {
- freedStacktraces.remove(address);
- }
- }
- }
-
/** Used by tests to stress off-heap memory compaction.
*
*/
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/5ef5150b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java
index b14d815..948c7f8 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java
@@ -24,6 +24,7 @@ import junit.framework.Assert;
import com.gemstone.gemfire.cache.CacheClosedException;
import com.gemstone.gemfire.internal.offheap.MemoryBlock;
import com.gemstone.gemfire.internal.offheap.RefCountChangeInfo;
+import com.gemstone.gemfire.internal.offheap.ReferenceCountHelper;
import com.gemstone.gemfire.internal.offheap.SimpleMemoryAllocatorImpl;
@SuppressWarnings("deprecation")
@@ -51,7 +52,7 @@ public class OffHeapTestUtil {
}
if(orphans != null && ! orphans.isEmpty()) {
- List<RefCountChangeInfo> info = SimpleMemoryAllocatorImpl.getRefCountInfo(orphans.get(0).getMemoryAddress());
+ List<RefCountChangeInfo> info = ReferenceCountHelper.getRefCountInfo(orphans.get(0).getMemoryAddress());
System.out.println("FOUND ORPHAN!!");
System.out.println("Sample orphan: " + orphans.get(0));
System.out.println("Orphan info: " + info);
[5/5] incubator-geode git commit: more cleanup of FreeListManager
Posted by ds...@apache.org.
more cleanup of FreeListManager
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/479faec7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/479faec7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/479faec7
Branch: refs/heads/feature/GEODE-580
Commit: 479faec7e88ddb741dd86d3405e7c9d5ced2665e
Parents: 8e7fabf
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Mon Nov 23 16:17:06 2015 -0800
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Mon Nov 23 16:17:06 2015 -0800
----------------------------------------------------------------------
.../gemstone/gemfire/internal/offheap/FreeListManager.java | 8 ++++----
.../gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java | 6 ------
2 files changed, 4 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/479faec7/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java
index d0a77bb..e6bc282 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java
@@ -40,7 +40,7 @@ import com.gemstone.gemfire.internal.offheap.MemoryBlock.State;
public class FreeListManager {
final private AtomicReferenceArray<SyncChunkStack> tinyFreeLists = new AtomicReferenceArray<SyncChunkStack>(SimpleMemoryAllocatorImpl.TINY_FREE_LIST_COUNT);
// hugeChunkSet is sorted by chunk size in ascending order. It will only contain chunks larger than MAX_TINY.
- final ConcurrentSkipListSet<Chunk> hugeChunkSet = new ConcurrentSkipListSet<Chunk>();
+ private final ConcurrentSkipListSet<Chunk> hugeChunkSet = new ConcurrentSkipListSet<Chunk>();
private final AtomicLong allocatedSize = new AtomicLong(0L);
private int getNearestTinyMultiple(int size) {
@@ -84,7 +84,7 @@ public class FreeListManager {
public long getUsedMemory() {
return this.allocatedSize.get();
}
- long getFreeMemory() {
+ public long getFreeMemory() {
return this.ma.getTotalMemory() - getUsedMemory();
}
long getFreeFragmentMemory() {
@@ -119,7 +119,7 @@ public class FreeListManager {
* The id of the last fragment we allocated from.
*/
private final AtomicInteger lastFragmentAllocation = new AtomicInteger(0);
- final CopyOnWriteArrayList<Fragment> fragmentList;
+ private final CopyOnWriteArrayList<Fragment> fragmentList;
private final SimpleMemoryAllocatorImpl ma;
public FreeListManager(SimpleMemoryAllocatorImpl ma) {
@@ -268,7 +268,7 @@ public class FreeListManager {
* Or to prevent it from happening we could just check the incoming slabs and throw away a few bytes
* to keep them from being contiguous.
*/
- boolean compact(int chunkSize) {
+ private boolean compact(int chunkSize) {
final long startCompactionTime = this.ma.getStats().startCompaction();
final int countPreSync = this.compactCount.get();
try {
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/479faec7/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
index e94bf4d..f16253e 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
@@ -724,10 +724,4 @@ public final class SimpleMemoryAllocatorImpl implements MemoryAllocator, MemoryI
*/
final boolean validateMemoryWithFill = Boolean.getBoolean("gemfire.validateOffHeapWithFill");
- /** Used by tests to stress off-heap memory compaction.
- *
- */
- public static void forceCompaction() {
- getAllocator().freeList.compact(0);
- }
}
[3/5] incubator-geode git commit: fixed analyze serializables for
off-heap
Posted by ds...@apache.org.
fixed analyze serializables for off-heap
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/e70e7359
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/e70e7359
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/e70e7359
Branch: refs/heads/feature/GEODE-580
Commit: e70e7359a9e6ced8e2ab15e870d5515b57a4a10e
Parents: 3c998ad
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Mon Nov 23 16:03:39 2015 -0800
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Mon Nov 23 16:03:39 2015 -0800
----------------------------------------------------------------------
.../resources/com/gemstone/gemfire/codeAnalysis/excludedClasses.txt | 1 +
.../com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt | 1 -
2 files changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e70e7359/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/excludedClasses.txt
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/excludedClasses.txt b/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/excludedClasses.txt
index 060a5ec..ce49654 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/excludedClasses.txt
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/excludedClasses.txt
@@ -19,6 +19,7 @@ com/gemstone/gemfire/internal/logging/log4j/LocalizedMessage
com/gemstone/gemfire/internal/logging/log4j/LogWriterAppenders
com/gemstone/gemfire/internal/logging/log4j/LogWriterAppenders$Identifier
com/gemstone/gemfire/internal/logging/log4j/LogWriterLogger
+com/gemstone/gemfire/internal/offheap/RefCountChangeInfo
com/gemstone/gemfire/internal/process/BlockingProcessStreamReader
com/gemstone/gemfire/internal/process/NonBlockingProcessStreamReader
com/gemstone/gemfire/internal/process/ProcessStreamReader
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/e70e7359/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt b/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
index b9252f7..2e38d9d 100644
--- a/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
+++ b/gemfire-core/src/test/resources/com/gemstone/gemfire/codeAnalysis/sanctionedSerializables.txt
@@ -470,7 +470,6 @@ com/gemstone/gemfire/internal/memcached/commands/ClientError,true,-2426928000696
com/gemstone/gemfire/internal/offheap/MemoryBlock$State,false
com/gemstone/gemfire/internal/offheap/OffHeapStorage$1,false
com/gemstone/gemfire/internal/offheap/OffHeapStorage$2,false
-com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl$RefCountChangeInfo,false,dupCount:int,owner:java/lang/Object,rc:int,stackTraceString:java/lang/String,threadName:java/lang/String
com/gemstone/gemfire/internal/offheap/annotations/OffHeapIdentifier,false,id:java/lang/String
com/gemstone/gemfire/internal/process/ConnectionFailedException,true,5622636452836752700
com/gemstone/gemfire/internal/process/FileAlreadyExistsException,true,5471082555536094256
[4/5] incubator-geode git commit: moved free list code to
FreeListManager
Posted by ds...@apache.org.
moved free list code to FreeListManager
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/8e7fabf0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/8e7fabf0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/8e7fabf0
Branch: refs/heads/feature/GEODE-580
Commit: 8e7fabf0590cb29cbf15e5280192f3f8ff890f28
Parents: e70e735
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Mon Nov 23 16:09:02 2015 -0800
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Mon Nov 23 16:09:02 2015 -0800
----------------------------------------------------------------------
.../internal/offheap/FreeListManager.java | 1245 ++++++++++--------
.../offheap/SimpleMemoryAllocatorImpl.java | 118 +-
2 files changed, 692 insertions(+), 671 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8e7fabf0/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java
index fc0f950..d0a77bb 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java
@@ -18,6 +18,9 @@ package com.gemstone.gemfire.internal.offheap;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
import java.util.List;
import java.util.NavigableSet;
import java.util.concurrent.ConcurrentSkipListSet;
@@ -29,627 +32,759 @@ import java.util.concurrent.atomic.AtomicReferenceArray;
import com.gemstone.gemfire.LogWriter;
import com.gemstone.gemfire.OutOfOffHeapMemoryException;
import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.internal.offheap.MemoryBlock.State;
/**
* Manages the free lists for a SimpleMemoryAllocatorImpl
*/
public class FreeListManager {
- final AtomicReferenceArray<SyncChunkStack> tinyFreeLists = new AtomicReferenceArray<SyncChunkStack>(SimpleMemoryAllocatorImpl.TINY_FREE_LIST_COUNT);
- // hugeChunkSet is sorted by chunk size in ascending order. It will only contain chunks larger than MAX_TINY.
- final ConcurrentSkipListSet<Chunk> hugeChunkSet = new ConcurrentSkipListSet<Chunk>();
- private final AtomicLong allocatedSize = new AtomicLong(0L);
-
- private int getNearestTinyMultiple(int size) {
- return (size-1)/SimpleMemoryAllocatorImpl.TINY_MULTIPLE;
- }
- public List<Chunk> getLiveChunks() {
- ArrayList<Chunk> result = new ArrayList<Chunk>();
- UnsafeMemoryChunk[] slabs = this.ma.getSlabs();
- for (int i=0; i < slabs.length; i++) {
- getLiveChunks(slabs[i], result);
- }
- return result;
- }
- private void getLiveChunks(UnsafeMemoryChunk slab, List<Chunk> result) {
- long addr = slab.getMemoryAddress();
- while (addr <= (slab.getMemoryAddress() + slab.getSize() - Chunk.MIN_CHUNK_SIZE)) {
- Fragment f = isAddrInFragmentFreeSpace(addr);
- if (f != null) {
- addr = f.getMemoryAddress() + f.getSize();
- } else {
- int curChunkSize = Chunk.getSize(addr);
- int refCount = Chunk.getRefCount(addr);
- if (refCount > 0) {
- result.add(this.ma.chunkFactory.newChunk(addr));
- }
- addr += curChunkSize;
+ final private AtomicReferenceArray<SyncChunkStack> tinyFreeLists = new AtomicReferenceArray<SyncChunkStack>(SimpleMemoryAllocatorImpl.TINY_FREE_LIST_COUNT);
+ // hugeChunkSet is sorted by chunk size in ascending order. It will only contain chunks larger than MAX_TINY.
+ final ConcurrentSkipListSet<Chunk> hugeChunkSet = new ConcurrentSkipListSet<Chunk>();
+ private final AtomicLong allocatedSize = new AtomicLong(0L);
+
+ private int getNearestTinyMultiple(int size) {
+ return (size-1)/SimpleMemoryAllocatorImpl.TINY_MULTIPLE;
+ }
+ List<Chunk> getLiveChunks() {
+ ArrayList<Chunk> result = new ArrayList<Chunk>();
+ UnsafeMemoryChunk[] slabs = this.ma.getSlabs();
+ for (int i=0; i < slabs.length; i++) {
+ getLiveChunks(slabs[i], result);
+ }
+ return result;
+ }
+ private void getLiveChunks(UnsafeMemoryChunk slab, List<Chunk> result) {
+ long addr = slab.getMemoryAddress();
+ while (addr <= (slab.getMemoryAddress() + slab.getSize() - Chunk.MIN_CHUNK_SIZE)) {
+ Fragment f = isAddrInFragmentFreeSpace(addr);
+ if (f != null) {
+ addr = f.getMemoryAddress() + f.getSize();
+ } else {
+ int curChunkSize = Chunk.getSize(addr);
+ int refCount = Chunk.getRefCount(addr);
+ if (refCount > 0) {
+ result.add(this.ma.chunkFactory.newChunk(addr));
}
+ addr += curChunkSize;
}
}
- /**
- * If addr is in the free space of a fragment then return that fragment; otherwise return null.
- */
- private Fragment isAddrInFragmentFreeSpace(long addr) {
- for (Fragment f: this.fragmentList) {
- if (addr >= (f.getMemoryAddress() + f.getFreeIndex()) && addr < (f.getMemoryAddress() + f.getSize())) {
- return f;
- }
+ }
+ /**
+ * If addr is in the free space of a fragment then return that fragment; otherwise return null.
+ */
+ private Fragment isAddrInFragmentFreeSpace(long addr) {
+ for (Fragment f: this.fragmentList) {
+ if (addr >= (f.getMemoryAddress() + f.getFreeIndex()) && addr < (f.getMemoryAddress() + f.getSize())) {
+ return f;
}
- return null;
- }
- public long getUsedMemory() {
- return this.allocatedSize.get();
- }
- public long getFreeMemory() {
- return this.ma.getTotalMemory() - getUsedMemory();
}
- public long getFreeFragmentMemory() {
- long result = 0;
- for (Fragment f: this.fragmentList) {
- int freeSpace = f.freeSpace();
- if (freeSpace >= Chunk.MIN_CHUNK_SIZE) {
- result += freeSpace;
- }
- }
- return result;
- }
- public long getFreeTinyMemory() {
- long tinyFree = 0;
- for (int i=0; i < this.tinyFreeLists.length(); i++) {
- SyncChunkStack cl = this.tinyFreeLists.get(i);
- if (cl != null) {
- tinyFree += cl.computeTotalSize();
- }
+ return null;
+ }
+ public long getUsedMemory() {
+ return this.allocatedSize.get();
+ }
+ long getFreeMemory() {
+ return this.ma.getTotalMemory() - getUsedMemory();
+ }
+ long getFreeFragmentMemory() {
+ long result = 0;
+ for (Fragment f: this.fragmentList) {
+ int freeSpace = f.freeSpace();
+ if (freeSpace >= Chunk.MIN_CHUNK_SIZE) {
+ result += freeSpace;
}
- return tinyFree;
}
- public long getFreeHugeMemory() {
- long hugeFree = 0;
- for (Chunk c: this.hugeChunkSet) {
- hugeFree += c.getSize();
- }
- return hugeFree;
- }
-
- /**
- * The id of the last fragment we allocated from.
- */
- private final AtomicInteger lastFragmentAllocation = new AtomicInteger(0);
- final CopyOnWriteArrayList<Fragment> fragmentList;
- private final SimpleMemoryAllocatorImpl ma;
-
- public FreeListManager(SimpleMemoryAllocatorImpl ma) {
- this.ma = ma;
- UnsafeMemoryChunk[] slabs = ma.getSlabs();
- Fragment[] tmp = new Fragment[slabs.length];
- for (int i=0; i < slabs.length; i++) {
- tmp[i] = new Fragment(slabs[i].getMemoryAddress(), slabs[i].getSize());
- }
- this.fragmentList = new CopyOnWriteArrayList<Fragment>(tmp);
-
- if(ma.validateMemoryWithFill) {
- fillFragments();
+ return result;
+ }
+ long getFreeTinyMemory() {
+ long tinyFree = 0;
+ for (int i=0; i < this.tinyFreeLists.length(); i++) {
+ SyncChunkStack cl = this.tinyFreeLists.get(i);
+ if (cl != null) {
+ tinyFree += cl.computeTotalSize();
}
}
-
- /**
- * Fills all fragments with a fill used for data integrity validation.
- */
- private void fillFragments() {
- for(Fragment fragment : this.fragmentList) {
- fragment.fill();
- }
+ return tinyFree;
+ }
+ long getFreeHugeMemory() {
+ long hugeFree = 0;
+ for (Chunk c: this.hugeChunkSet) {
+ hugeFree += c.getSize();
}
-
- /**
- * Allocate a chunk of memory of at least the given size.
- * The basic algorithm is:
- * 1. Look for a previously allocated and freed chunk close to the size requested.
- * 2. See if the original chunk is big enough to split. If so do so.
- * 3. Look for a previously allocated and freed chunk of any size larger than the one requested.
- * If we find one split it.
- * <p>
- * It might be better not to include step 3 since we expect and freed chunk to be reallocated in the future.
- * Maybe it would be better for 3 to look for adjacent free blocks that can be merged together.
- * For now we will just try 1 and 2 and then report out of mem.
- * @param size minimum bytes the returned chunk must have.
- * @param chunkType TODO
- * @return the allocated chunk
- * @throws IllegalStateException if a chunk can not be allocated.
- */
- @SuppressWarnings("synthetic-access")
- public Chunk allocate(int size, ChunkType chunkType) {
- Chunk result = null;
- {
- assert size > 0;
- if (chunkType == null) {
- chunkType = GemFireChunk.TYPE;
- }
- result = basicAllocate(size, true, chunkType);
- result.setDataSize(size);
- }
- this.ma.stats.incObjects(1);
- int resultSize = result.getSize();
- this.allocatedSize.addAndGet(resultSize);
- this.ma.stats.incUsedMemory(resultSize);
- this.ma.stats.incFreeMemory(-resultSize);
- result.initializeUseCount();
- this.ma.notifyListeners();
-
- return result;
- }
-
- private Chunk basicAllocate(int size, boolean useSlabs, ChunkType chunkType) {
- if (useSlabs) {
- // Every object stored off heap has a header so we need
- // to adjust the size so that the header gets allocated.
- // If useSlabs is false then the incoming size has already
- // been adjusted.
- size += Chunk.OFF_HEAP_HEADER_SIZE;
- }
- if (size <= SimpleMemoryAllocatorImpl.MAX_TINY) {
- return allocateTiny(size, useSlabs, chunkType);
- } else {
- return allocateHuge(size, useSlabs, chunkType);
- }
- }
-
- private Chunk allocateFromFragments(int chunkSize, ChunkType chunkType) {
- do {
- final int lastAllocationId = this.lastFragmentAllocation.get();
- for (int i=lastAllocationId; i < this.fragmentList.size(); i++) {
- Chunk result = allocateFromFragment(i, chunkSize, chunkType);
- if (result != null) {
- return result;
- }
- }
- for (int i=0; i < lastAllocationId; i++) {
- Chunk result = allocateFromFragment(i, chunkSize, chunkType);
- if (result != null) {
- return result;
- }
- }
- } while (compact(chunkSize));
- // We tried all the fragments and didn't find any free memory.
- logOffHeapState(chunkSize);
- final OutOfOffHeapMemoryException failure = new OutOfOffHeapMemoryException("Out of off-heap memory. Could not allocate size of " + chunkSize);
- try {
- throw failure;
- } finally {
- this.ma.ooohml.outOfOffHeapMemory(failure);
- }
+ return hugeFree;
+ }
+
+ /**
+ * The id of the last fragment we allocated from.
+ */
+ private final AtomicInteger lastFragmentAllocation = new AtomicInteger(0);
+ final CopyOnWriteArrayList<Fragment> fragmentList;
+ private final SimpleMemoryAllocatorImpl ma;
+
+ public FreeListManager(SimpleMemoryAllocatorImpl ma) {
+ this.ma = ma;
+ UnsafeMemoryChunk[] slabs = ma.getSlabs();
+ Fragment[] tmp = new Fragment[slabs.length];
+ for (int i=0; i < slabs.length; i++) {
+ tmp[i] = new Fragment(slabs[i].getMemoryAddress(), slabs[i].getSize());
+ }
+ this.fragmentList = new CopyOnWriteArrayList<Fragment>(tmp);
+
+ if(ma.validateMemoryWithFill) {
+ fillFragments();
}
-
- private void logOffHeapState(int chunkSize) {
- if (InternalDistributedSystem.getAnyInstance() != null) {
- LogWriter lw = InternalDistributedSystem.getAnyInstance().getLogWriter();
- lw.info("OutOfOffHeapMemory allocating size of " + chunkSize + ". allocated=" + this.allocatedSize.get() + " compactions=" + this.compactCount.get() + " objects=" + this.ma.stats.getObjects() + " free=" + this.ma.stats.getFreeMemory() + " fragments=" + this.ma.stats.getFragments() + " largestFragment=" + this.ma.stats.getLargestFragment() + " fragmentation=" + this.ma.stats.getFragmentation());
- logFragmentState(lw);
- logTinyState(lw);
- logHugeState(lw);
+ }
+
+ /**
+ * Fills all fragments with a fill used for data integrity validation.
+ */
+ private void fillFragments() {
+ for(Fragment fragment : this.fragmentList) {
+ fragment.fill();
+ }
+ }
+
+ /**
+ * Allocate a chunk of memory of at least the given size.
+ * The basic algorithm is:
+ * 1. Look for a previously allocated and freed chunk close to the size requested.
+ * 2. See if the original chunk is big enough to split. If so do so.
+ * 3. Look for a previously allocated and freed chunk of any size larger than the one requested.
+ * If we find one split it.
+ * <p>
+ * It might be better not to include step 3 since we expect and freed chunk to be reallocated in the future.
+ * Maybe it would be better for 3 to look for adjacent free blocks that can be merged together.
+ * For now we will just try 1 and 2 and then report out of mem.
+ * @param size minimum bytes the returned chunk must have.
+ * @param chunkType TODO
+ * @return the allocated chunk
+ * @throws IllegalStateException if a chunk can not be allocated.
+ */
+ @SuppressWarnings("synthetic-access")
+ public Chunk allocate(int size, ChunkType chunkType) {
+ Chunk result = null;
+ {
+ assert size > 0;
+ if (chunkType == null) {
+ chunkType = GemFireChunk.TYPE;
}
- }
+ result = basicAllocate(size, true, chunkType);
+ result.setDataSize(size);
+ }
+ this.ma.stats.incObjects(1);
+ int resultSize = result.getSize();
+ this.allocatedSize.addAndGet(resultSize);
+ this.ma.stats.incUsedMemory(resultSize);
+ this.ma.stats.incFreeMemory(-resultSize);
+ result.initializeUseCount();
+ this.ma.notifyListeners();
- private void logHugeState(LogWriter lw) {
- for (Chunk c: this.hugeChunkSet) {
- lw.info("Free huge of size " + c.getSize());
+ return result;
+ }
+
+ private Chunk basicAllocate(int size, boolean useSlabs, ChunkType chunkType) {
+ if (useSlabs) {
+ // Every object stored off heap has a header so we need
+ // to adjust the size so that the header gets allocated.
+ // If useSlabs is false then the incoming size has already
+ // been adjusted.
+ size += Chunk.OFF_HEAP_HEADER_SIZE;
+ }
+ if (size <= SimpleMemoryAllocatorImpl.MAX_TINY) {
+ return allocateTiny(size, useSlabs, chunkType);
+ } else {
+ return allocateHuge(size, useSlabs, chunkType);
+ }
+ }
+
+ private Chunk allocateFromFragments(int chunkSize, ChunkType chunkType) {
+ do {
+ final int lastAllocationId = this.lastFragmentAllocation.get();
+ for (int i=lastAllocationId; i < this.fragmentList.size(); i++) {
+ Chunk result = allocateFromFragment(i, chunkSize, chunkType);
+ if (result != null) {
+ return result;
+ }
}
- }
- private void logTinyState(LogWriter lw) {
- for (int i=0; i < this.tinyFreeLists.length(); i++) {
- SyncChunkStack cl = this.tinyFreeLists.get(i);
- if (cl != null) {
- cl.logSizes(lw, "Free tiny of size ");
+ for (int i=0; i < lastAllocationId; i++) {
+ Chunk result = allocateFromFragment(i, chunkSize, chunkType);
+ if (result != null) {
+ return result;
}
}
+ } while (compact(chunkSize));
+ // We tried all the fragments and didn't find any free memory.
+ logOffHeapState(chunkSize);
+ final OutOfOffHeapMemoryException failure = new OutOfOffHeapMemoryException("Out of off-heap memory. Could not allocate size of " + chunkSize);
+ try {
+ throw failure;
+ } finally {
+ this.ma.ooohml.outOfOffHeapMemory(failure);
+ }
+ }
+
+ private void logOffHeapState(int chunkSize) {
+ if (InternalDistributedSystem.getAnyInstance() != null) {
+ LogWriter lw = InternalDistributedSystem.getAnyInstance().getLogWriter();
+ lw.info("OutOfOffHeapMemory allocating size of " + chunkSize + ". allocated=" + this.allocatedSize.get() + " compactions=" + this.compactCount.get() + " objects=" + this.ma.stats.getObjects() + " free=" + this.ma.stats.getFreeMemory() + " fragments=" + this.ma.stats.getFragments() + " largestFragment=" + this.ma.stats.getLargestFragment() + " fragmentation=" + this.ma.stats.getFragmentation());
+ logFragmentState(lw);
+ logTinyState(lw);
+ logHugeState(lw);
+ }
+ }
+
+ private void logHugeState(LogWriter lw) {
+ for (Chunk c: this.hugeChunkSet) {
+ lw.info("Free huge of size " + c.getSize());
+ }
+ }
+ private void logTinyState(LogWriter lw) {
+ for (int i=0; i < this.tinyFreeLists.length(); i++) {
+ SyncChunkStack cl = this.tinyFreeLists.get(i);
+ if (cl != null) {
+ cl.logSizes(lw, "Free tiny of size ");
+ }
}
- private void logFragmentState(LogWriter lw) {
- for (Fragment f: this.fragmentList) {
- int freeSpace = f.freeSpace();
- if (freeSpace > 0) {
- lw.info("Fragment at " + f.getMemoryAddress() + " of size " + f.getSize() + " has " + freeSpace + " bytes free.");
- }
+ }
+ private void logFragmentState(LogWriter lw) {
+ for (Fragment f: this.fragmentList) {
+ int freeSpace = f.freeSpace();
+ if (freeSpace > 0) {
+ lw.info("Fragment at " + f.getMemoryAddress() + " of size " + f.getSize() + " has " + freeSpace + " bytes free.");
}
}
+ }
- private final AtomicInteger compactCount = new AtomicInteger();
- /**
- * Compacts memory and returns true if enough memory to allocate chunkSize
- * is freed. Otherwise returns false;
- * TODO OFFHEAP: what should be done about contiguous chunks that end up being bigger than 2G?
- * Currently if we are given slabs bigger than 2G or that just happen to be contiguous and add
- * up to 2G then the compactor may unify them together into a single Chunk and our 32-bit chunkSize
- * field will overflow. This code needs to detect this and just create a chunk of 2G and then start
- * a new one.
- * Or to prevent it from happening we could just check the incoming slabs and throw away a few bytes
- * to keep them from being contiguous.
- */
- boolean compact(int chunkSize) {
- final long startCompactionTime = this.ma.getStats().startCompaction();
- final int countPreSync = this.compactCount.get();
- try {
- synchronized (this) {
- if (this.compactCount.get() != countPreSync) {
- // someone else did a compaction while we waited on the sync.
- // So just return true causing the caller to retry the allocation.
- return true;
- }
- ArrayList<SyncChunkStack> freeChunks = new ArrayList<SyncChunkStack>();
- collectFreeChunks(freeChunks);
- final int SORT_ARRAY_BLOCK_SIZE = 128;
- long[] sorted = new long[SORT_ARRAY_BLOCK_SIZE];
- int sortedSize = 0;
- boolean result = false;
- int largestFragment = 0;
- for (SyncChunkStack l: freeChunks) {
- long addr = l.poll();
- while (addr != 0) {
- int idx = Arrays.binarySearch(sorted, 0, sortedSize, addr);
- //System.out.println("DEBUG addr=" + addr + " size=" + Chunk.getSize(addr) + " idx="+idx + " sortedSize=" + sortedSize);
- if (idx >= 0) {
- throw new IllegalStateException("duplicate memory address found during compaction!");
- }
- idx = -idx;
- idx--;
- if (idx == sortedSize) {
- // addr is > everything in the array
- if (sortedSize == 0) {
- // nothing was in the array
- sorted[0] = addr;
- sortedSize++;
+ private final AtomicInteger compactCount = new AtomicInteger();
+ /**
+ * Compacts memory and returns true if enough memory to allocate chunkSize
+ * is freed. Otherwise returns false;
+ * TODO OFFHEAP: what should be done about contiguous chunks that end up being bigger than 2G?
+ * Currently if we are given slabs bigger than 2G or that just happen to be contiguous and add
+ * up to 2G then the compactor may unify them together into a single Chunk and our 32-bit chunkSize
+ * field will overflow. This code needs to detect this and just create a chunk of 2G and then start
+ * a new one.
+ * Or to prevent it from happening we could just check the incoming slabs and throw away a few bytes
+ * to keep them from being contiguous.
+ */
+ boolean compact(int chunkSize) {
+ final long startCompactionTime = this.ma.getStats().startCompaction();
+ final int countPreSync = this.compactCount.get();
+ try {
+ synchronized (this) {
+ if (this.compactCount.get() != countPreSync) {
+ // someone else did a compaction while we waited on the sync.
+ // So just return true causing the caller to retry the allocation.
+ return true;
+ }
+ ArrayList<SyncChunkStack> freeChunks = new ArrayList<SyncChunkStack>();
+ collectFreeChunks(freeChunks);
+ final int SORT_ARRAY_BLOCK_SIZE = 128;
+ long[] sorted = new long[SORT_ARRAY_BLOCK_SIZE];
+ int sortedSize = 0;
+ boolean result = false;
+ int largestFragment = 0;
+ for (SyncChunkStack l: freeChunks) {
+ long addr = l.poll();
+ while (addr != 0) {
+ int idx = Arrays.binarySearch(sorted, 0, sortedSize, addr);
+ //System.out.println("DEBUG addr=" + addr + " size=" + Chunk.getSize(addr) + " idx="+idx + " sortedSize=" + sortedSize);
+ if (idx >= 0) {
+ throw new IllegalStateException("duplicate memory address found during compaction!");
+ }
+ idx = -idx;
+ idx--;
+ if (idx == sortedSize) {
+ // addr is > everything in the array
+ if (sortedSize == 0) {
+ // nothing was in the array
+ sorted[0] = addr;
+ sortedSize++;
+ } else {
+ // see if we can conflate into sorted[idx]
+ long lowAddr = sorted[idx-1];
+ int lowSize = Chunk.getSize(lowAddr);
+ if (lowAddr + lowSize == addr) {
+ // append the addr chunk to lowAddr
+ Chunk.setSize(lowAddr, lowSize + Chunk.getSize(addr));
} else {
- // see if we can conflate into sorted[idx]
+ if (sortedSize >= sorted.length) {
+ long[] newSorted = new long[sorted.length+SORT_ARRAY_BLOCK_SIZE];
+ System.arraycopy(sorted, 0, newSorted, 0, sorted.length);
+ sorted = newSorted;
+ }
+ sortedSize++;
+ sorted[idx] = addr;
+ }
+ }
+ } else {
+ int addrSize = Chunk.getSize(addr);
+ long highAddr = sorted[idx];
+ if (addr + addrSize == highAddr) {
+ // append highAddr chunk to addr
+ Chunk.setSize(addr, addrSize + Chunk.getSize(highAddr));
+ sorted[idx] = addr;
+ } else {
+ boolean insert = idx==0;
+ if (!insert) {
long lowAddr = sorted[idx-1];
+ // if (lowAddr == 0L) {
+ // long[] tmp = Arrays.copyOf(sorted, sortedSize);
+ // throw new IllegalStateException("addr was zero at idx=" + (idx-1) + " sorted="+ Arrays.toString(tmp));
+ // }
int lowSize = Chunk.getSize(lowAddr);
if (lowAddr + lowSize == addr) {
// append the addr chunk to lowAddr
- Chunk.setSize(lowAddr, lowSize + Chunk.getSize(addr));
+ Chunk.setSize(lowAddr, lowSize + addrSize);
} else {
- if (sortedSize >= sorted.length) {
- long[] newSorted = new long[sorted.length+SORT_ARRAY_BLOCK_SIZE];
- System.arraycopy(sorted, 0, newSorted, 0, sorted.length);
- sorted = newSorted;
- }
- sortedSize++;
- sorted[idx] = addr;
+ insert = true;
}
}
- } else {
- int addrSize = Chunk.getSize(addr);
- long highAddr = sorted[idx];
- if (addr + addrSize == highAddr) {
- // append highAddr chunk to addr
- Chunk.setSize(addr, addrSize + Chunk.getSize(highAddr));
- sorted[idx] = addr;
- } else {
- boolean insert = idx==0;
- if (!insert) {
- long lowAddr = sorted[idx-1];
- // if (lowAddr == 0L) {
- // long[] tmp = Arrays.copyOf(sorted, sortedSize);
- // throw new IllegalStateException("addr was zero at idx=" + (idx-1) + " sorted="+ Arrays.toString(tmp));
- // }
- int lowSize = Chunk.getSize(lowAddr);
- if (lowAddr + lowSize == addr) {
- // append the addr chunk to lowAddr
- Chunk.setSize(lowAddr, lowSize + addrSize);
- } else {
- insert = true;
- }
- }
- if (insert) {
- if (sortedSize >= sorted.length) {
- long[] newSorted = new long[sorted.length+SORT_ARRAY_BLOCK_SIZE];
- System.arraycopy(sorted, 0, newSorted, 0, idx);
- newSorted[idx] = addr;
- System.arraycopy(sorted, idx, newSorted, idx+1, sortedSize-idx);
- sorted = newSorted;
- } else {
- System.arraycopy(sorted, idx, sorted, idx+1, sortedSize-idx);
- sorted[idx] = addr;
- }
- sortedSize++;
+ if (insert) {
+ if (sortedSize >= sorted.length) {
+ long[] newSorted = new long[sorted.length+SORT_ARRAY_BLOCK_SIZE];
+ System.arraycopy(sorted, 0, newSorted, 0, idx);
+ newSorted[idx] = addr;
+ System.arraycopy(sorted, idx, newSorted, idx+1, sortedSize-idx);
+ sorted = newSorted;
+ } else {
+ System.arraycopy(sorted, idx, sorted, idx+1, sortedSize-idx);
+ sorted[idx] = addr;
}
+ sortedSize++;
}
}
- addr = l.poll();
}
+ addr = l.poll();
}
- for (int i=sortedSize-1; i > 0; i--) {
- long addr = sorted[i];
- long lowAddr = sorted[i-1];
- int lowSize = Chunk.getSize(lowAddr);
- if (lowAddr + lowSize == addr) {
- // append addr chunk to lowAddr
- Chunk.setSize(lowAddr, lowSize + Chunk.getSize(addr));
- sorted[i] = 0L;
- }
- }
- this.lastFragmentAllocation.set(0);
- ArrayList<Fragment> tmp = new ArrayList<Fragment>();
- for (int i=sortedSize-1; i >= 0; i--) {
- long addr = sorted[i];
- if (addr == 0L) continue;
- int addrSize = Chunk.getSize(addr);
- Fragment f = new Fragment(addr, addrSize);
- if (addrSize >= chunkSize) {
- result = true;
- }
- if (addrSize > largestFragment) {
- largestFragment = addrSize;
- // TODO it might be better to sort them biggest first
- tmp.add(0, f);
- } else {
- tmp.add(f);
- }
+ }
+ for (int i=sortedSize-1; i > 0; i--) {
+ long addr = sorted[i];
+ long lowAddr = sorted[i-1];
+ int lowSize = Chunk.getSize(lowAddr);
+ if (lowAddr + lowSize == addr) {
+ // append addr chunk to lowAddr
+ Chunk.setSize(lowAddr, lowSize + Chunk.getSize(addr));
+ sorted[i] = 0L;
}
- this.fragmentList.addAll(tmp);
-
- // Reinitialize fragments with fill pattern data
- if(this.ma.validateMemoryWithFill) {
- fillFragments();
+ }
+ this.lastFragmentAllocation.set(0);
+ ArrayList<Fragment> tmp = new ArrayList<Fragment>();
+ for (int i=sortedSize-1; i >= 0; i--) {
+ long addr = sorted[i];
+ if (addr == 0L) continue;
+ int addrSize = Chunk.getSize(addr);
+ Fragment f = new Fragment(addr, addrSize);
+ if (addrSize >= chunkSize) {
+ result = true;
}
-
- // Signal any waiters that a compaction happened.
- this.compactCount.incrementAndGet();
-
- this.ma.getStats().setLargestFragment(largestFragment);
- this.ma.getStats().setFragments(tmp.size());
- updateFragmentation();
-
- return result;
- } // sync
- } finally {
- this.ma.getStats().endCompaction(startCompactionTime);
- }
- }
-
- private void updateFragmentation() {
- long freeSize = this.ma.getStats().getFreeMemory();
-
- // Calculate free space fragmentation only if there is free space available.
- if(freeSize > 0) {
- long largestFragment = this.ma.getStats().getLargestFragment();
- long numerator = freeSize - largestFragment;
-
- double percentage = (double) numerator / (double) freeSize;
- percentage *= 100d;
-
- int wholePercentage = (int) Math.rint(percentage);
- this.ma.getStats().setFragmentation(wholePercentage);
- } else {
- // No free space? Then we have no free space fragmentation.
- this.ma.getStats().setFragmentation(0);
- }
- }
-
- private void collectFreeChunks(List<SyncChunkStack> l) {
- collectFreeFragmentChunks(l);
- collectFreeHugeChunks(l);
- collectFreeTinyChunks(l);
- }
- private void collectFreeFragmentChunks(List<SyncChunkStack> l) {
- if (this.fragmentList.size() == 0) return;
- SyncChunkStack result = new SyncChunkStack();
- for (Fragment f: this.fragmentList) {
- int offset;
- int diff;
- do {
- offset = f.getFreeIndex();
- diff = f.getSize() - offset;
- } while (diff >= Chunk.MIN_CHUNK_SIZE && !f.allocate(offset, offset+diff));
- if (diff < Chunk.MIN_CHUNK_SIZE) {
- if (diff > 0) {
- SimpleMemoryAllocatorImpl.logger.debug("Lost memory of size {}", diff);
+ if (addrSize > largestFragment) {
+ largestFragment = addrSize;
+ // TODO it might be better to sort them biggest first
+ tmp.add(0, f);
+ } else {
+ tmp.add(f);
}
- // fragment is too small to turn into a chunk
- // TODO we need to make sure this never happens
- // by keeping sizes rounded. I think I did this
- // by introducing MIN_CHUNK_SIZE and by rounding
- // the size of huge allocations.
- continue;
}
- long chunkAddr = f.getMemoryAddress()+offset;
- Chunk.setSize(chunkAddr, diff);
- result.offer(chunkAddr);
- }
- // All the fragments have been turned in to chunks so now clear them
- // The compaction will create new fragments.
- this.fragmentList.clear();
- if (!result.isEmpty()) {
- l.add(result);
- }
+ this.fragmentList.addAll(tmp);
+
+ // Reinitialize fragments with fill pattern data
+ if(this.ma.validateMemoryWithFill) {
+ fillFragments();
+ }
+
+ // Signal any waiters that a compaction happened.
+ this.compactCount.incrementAndGet();
+
+ this.ma.getStats().setLargestFragment(largestFragment);
+ this.ma.getStats().setFragments(tmp.size());
+ updateFragmentation();
+
+ return result;
+ } // sync
+ } finally {
+ this.ma.getStats().endCompaction(startCompactionTime);
}
- private void collectFreeTinyChunks(List<SyncChunkStack> l) {
- for (int i=0; i < this.tinyFreeLists.length(); i++) {
- SyncChunkStack cl = this.tinyFreeLists.get(i);
- if (cl != null) {
- long head = cl.clear();
- if (head != 0L) {
- l.add(new SyncChunkStack(head));
- }
+ }
+
+ private void updateFragmentation() {
+ long freeSize = this.ma.getStats().getFreeMemory();
+
+ // Calculate free space fragmentation only if there is free space available.
+ if(freeSize > 0) {
+ long largestFragment = this.ma.getStats().getLargestFragment();
+ long numerator = freeSize - largestFragment;
+
+ double percentage = (double) numerator / (double) freeSize;
+ percentage *= 100d;
+
+ int wholePercentage = (int) Math.rint(percentage);
+ this.ma.getStats().setFragmentation(wholePercentage);
+ } else {
+ // No free space? Then we have no free space fragmentation.
+ this.ma.getStats().setFragmentation(0);
+ }
+ }
+
+ private void collectFreeChunks(List<SyncChunkStack> l) {
+ collectFreeFragmentChunks(l);
+ collectFreeHugeChunks(l);
+ collectFreeTinyChunks(l);
+ }
+ private void collectFreeFragmentChunks(List<SyncChunkStack> l) {
+ if (this.fragmentList.size() == 0) return;
+ SyncChunkStack result = new SyncChunkStack();
+ for (Fragment f: this.fragmentList) {
+ int offset;
+ int diff;
+ do {
+ offset = f.getFreeIndex();
+ diff = f.getSize() - offset;
+ } while (diff >= Chunk.MIN_CHUNK_SIZE && !f.allocate(offset, offset+diff));
+ if (diff < Chunk.MIN_CHUNK_SIZE) {
+ if (diff > 0) {
+ SimpleMemoryAllocatorImpl.logger.debug("Lost memory of size {}", diff);
}
+ // fragment is too small to turn into a chunk
+ // TODO we need to make sure this never happens
+ // by keeping sizes rounded. I think I did this
+ // by introducing MIN_CHUNK_SIZE and by rounding
+ // the size of huge allocations.
+ continue;
}
- }
- public void collectFreeHugeChunks(List<SyncChunkStack> l) {
- Chunk c = this.hugeChunkSet.pollFirst();
- SyncChunkStack result = null;
- while (c != null) {
- if (result == null) {
- result = new SyncChunkStack();
- l.add(result);
+ long chunkAddr = f.getMemoryAddress()+offset;
+ Chunk.setSize(chunkAddr, diff);
+ result.offer(chunkAddr);
+ }
+ // All the fragments have been turned in to chunks so now clear them
+ // The compaction will create new fragments.
+ this.fragmentList.clear();
+ if (!result.isEmpty()) {
+ l.add(result);
+ }
+ }
+ private void collectFreeTinyChunks(List<SyncChunkStack> l) {
+ for (int i=0; i < this.tinyFreeLists.length(); i++) {
+ SyncChunkStack cl = this.tinyFreeLists.get(i);
+ if (cl != null) {
+ long head = cl.clear();
+ if (head != 0L) {
+ l.add(new SyncChunkStack(head));
}
- result.offer(c.getMemoryAddress());
- c = this.hugeChunkSet.pollFirst();
}
}
-
- private Chunk allocateFromFragment(final int fragIdx, final int chunkSize, ChunkType chunkType) {
- if (fragIdx >= this.fragmentList.size()) return null;
- final Fragment fragment;
- try {
- fragment = this.fragmentList.get(fragIdx);
- } catch (IndexOutOfBoundsException ignore) {
- // A concurrent compaction can cause this.
- return null;
+ }
+ private void collectFreeHugeChunks(List<SyncChunkStack> l) {
+ Chunk c = this.hugeChunkSet.pollFirst();
+ SyncChunkStack result = null;
+ while (c != null) {
+ if (result == null) {
+ result = new SyncChunkStack();
+ l.add(result);
}
- boolean retryFragment;
- do {
- retryFragment = false;
- int oldOffset = fragment.getFreeIndex();
- int fragmentSize = fragment.getSize();
- int fragmentFreeSize = fragmentSize - oldOffset;
- if (fragmentFreeSize >= chunkSize) {
- // this fragment has room
- // Try to allocate up to BATCH_SIZE more chunks from it
- int allocSize = chunkSize * SimpleMemoryAllocatorImpl.BATCH_SIZE;
- if (allocSize > fragmentFreeSize) {
- allocSize = (fragmentFreeSize / chunkSize) * chunkSize;
- }
- int newOffset = oldOffset + allocSize;
- int extraSize = fragmentSize - newOffset;
- if (extraSize < Chunk.MIN_CHUNK_SIZE) {
- // include these last few bytes of the fragment in the allocation.
- // If we don't then they will be lost forever.
- // The extraSize bytes only apply to the first chunk we allocate (not the batch ones).
- newOffset += extraSize;
- } else {
- extraSize = 0;
- }
- if (fragment.allocate(oldOffset, newOffset)) {
- // We did the allocate!
- this.lastFragmentAllocation.set(fragIdx);
- Chunk result = this.ma.chunkFactory.newChunk(fragment.getMemoryAddress()+oldOffset, chunkSize+extraSize, chunkType);
- allocSize -= chunkSize+extraSize;
- oldOffset += extraSize;
- while (allocSize > 0) {
- oldOffset += chunkSize;
- // we add the batch ones immediately to the freelist
- result.readyForFree();
- free(result.getMemoryAddress(), false);
- result = this.ma.chunkFactory.newChunk(fragment.getMemoryAddress()+oldOffset, chunkSize, chunkType);
- allocSize -= chunkSize;
- }
-
- if(this.ma.validateMemoryWithFill) {
- result.validateFill();
- }
-
- return result;
- } else {
- // TODO OFFHEAP: if batch allocations are disabled should we not call basicAllocate here?
- // Since we know another thread did a concurrent alloc
- // that possibly did a batch check the free list again.
- Chunk result = basicAllocate(chunkSize, false, chunkType);
- if (result != null) {
- return result;
- }
- retryFragment = true;
- }
- }
- } while (retryFragment);
- return null; // did not find enough free space in this fragment
+ result.offer(c.getMemoryAddress());
+ c = this.hugeChunkSet.pollFirst();
}
+ }
- private int round(int multiple, int value) {
- return (int) ((((long)value + (multiple-1)) / multiple) * multiple);
- }
- private Chunk allocateTiny(int size, boolean useFragments, ChunkType chunkType) {
- return basicAllocate(getNearestTinyMultiple(size), SimpleMemoryAllocatorImpl.TINY_MULTIPLE, 0, this.tinyFreeLists, useFragments, chunkType);
+ private Chunk allocateFromFragment(final int fragIdx, final int chunkSize, ChunkType chunkType) {
+ if (fragIdx >= this.fragmentList.size()) return null;
+ final Fragment fragment;
+ try {
+ fragment = this.fragmentList.get(fragIdx);
+ } catch (IndexOutOfBoundsException ignore) {
+ // A concurrent compaction can cause this.
+ return null;
}
- private Chunk basicAllocate(int idx, int multiple, int offset, AtomicReferenceArray<SyncChunkStack> freeLists, boolean useFragments, ChunkType chunkType) {
- SyncChunkStack clq = freeLists.get(idx);
- if (clq != null) {
- long memAddr = clq.poll();
- if (memAddr != 0) {
- Chunk result = this.ma.chunkFactory.newChunk(memAddr, chunkType);
-
- // Data integrity check.
- if(this.ma.validateMemoryWithFill) {
- result.validateFill();
- }
-
- result.readyForAllocation(chunkType);
- return result;
+ boolean retryFragment;
+ do {
+ retryFragment = false;
+ int oldOffset = fragment.getFreeIndex();
+ int fragmentSize = fragment.getSize();
+ int fragmentFreeSize = fragmentSize - oldOffset;
+ if (fragmentFreeSize >= chunkSize) {
+ // this fragment has room
+ // Try to allocate up to BATCH_SIZE more chunks from it
+ int allocSize = chunkSize * SimpleMemoryAllocatorImpl.BATCH_SIZE;
+ if (allocSize > fragmentFreeSize) {
+ allocSize = (fragmentFreeSize / chunkSize) * chunkSize;
}
- }
- if (useFragments) {
- return allocateFromFragments(((idx+1)*multiple)+offset, chunkType);
- } else {
- return null;
- }
- }
- private Chunk allocateHuge(int size, boolean useFragments, ChunkType chunkType) {
- // sizeHolder is a fake Chunk used to search our sorted hugeChunkSet.
- Chunk sizeHolder = new FakeChunk(size);
- NavigableSet<Chunk> ts = this.hugeChunkSet.tailSet(sizeHolder);
- Chunk result = ts.pollFirst();
- if (result != null) {
- if (result.getSize() - (SimpleMemoryAllocatorImpl.HUGE_MULTIPLE - Chunk.OFF_HEAP_HEADER_SIZE) < size) {
- // close enough to the requested size; just return it.
-
- // Data integrity check.
- if(this.ma.validateMemoryWithFill) {
- result.validateFill();
+ int newOffset = oldOffset + allocSize;
+ int extraSize = fragmentSize - newOffset;
+ if (extraSize < Chunk.MIN_CHUNK_SIZE) {
+ // include these last few bytes of the fragment in the allocation.
+ // If we don't then they will be lost forever.
+ // The extraSize bytes only apply to the first chunk we allocate (not the batch ones).
+ newOffset += extraSize;
+ } else {
+ extraSize = 0;
+ }
+ if (fragment.allocate(oldOffset, newOffset)) {
+ // We did the allocate!
+ this.lastFragmentAllocation.set(fragIdx);
+ Chunk result = this.ma.chunkFactory.newChunk(fragment.getMemoryAddress()+oldOffset, chunkSize+extraSize, chunkType);
+ allocSize -= chunkSize+extraSize;
+ oldOffset += extraSize;
+ while (allocSize > 0) {
+ oldOffset += chunkSize;
+ // we add the batch ones immediately to the freelist
+ result.readyForFree();
+ free(result.getMemoryAddress(), false);
+ result = this.ma.chunkFactory.newChunk(fragment.getMemoryAddress()+oldOffset, chunkSize, chunkType);
+ allocSize -= chunkSize;
}
- if (chunkType.getSrcType() != Chunk.getSrcType(result.getMemoryAddress())) {
- // The java wrapper class that was cached in the huge chunk list is the wrong type.
- // So allocate a new one and garbage collect the old one.
- result = this.ma.chunkFactory.newChunk(result.getMemoryAddress(), chunkType);
+
+ if(this.ma.validateMemoryWithFill) {
+ result.validateFill();
}
- result.readyForAllocation(chunkType);
+
return result;
} else {
- this.hugeChunkSet.add(result);
+ // TODO OFFHEAP: if batch allocations are disabled should we not call basicAllocate here?
+ // Since we know another thread did a concurrent alloc
+ // that possibly did a batch check the free list again.
+ Chunk result = basicAllocate(chunkSize, false, chunkType);
+ if (result != null) {
+ return result;
+ }
+ retryFragment = true;
}
}
- if (useFragments) {
- // We round it up to the next multiple of TINY_MULTIPLE to make
- // sure we always have chunks allocated on an 8 byte boundary.
- return allocateFromFragments(round(SimpleMemoryAllocatorImpl.TINY_MULTIPLE, size), chunkType);
- } else {
- return null;
+ } while (retryFragment);
+ return null; // did not find enough free space in this fragment
+ }
+
+ private int round(int multiple, int value) {
+ return (int) ((((long)value + (multiple-1)) / multiple) * multiple);
+ }
+ private Chunk allocateTiny(int size, boolean useFragments, ChunkType chunkType) {
+ return basicAllocate(getNearestTinyMultiple(size), SimpleMemoryAllocatorImpl.TINY_MULTIPLE, 0, this.tinyFreeLists, useFragments, chunkType);
+ }
+ private Chunk basicAllocate(int idx, int multiple, int offset, AtomicReferenceArray<SyncChunkStack> freeLists, boolean useFragments, ChunkType chunkType) {
+ SyncChunkStack clq = freeLists.get(idx);
+ if (clq != null) {
+ long memAddr = clq.poll();
+ if (memAddr != 0) {
+ Chunk result = this.ma.chunkFactory.newChunk(memAddr, chunkType);
+
+ // Data integrity check.
+ if(this.ma.validateMemoryWithFill) {
+ result.validateFill();
+ }
+
+ result.readyForAllocation(chunkType);
+ return result;
}
}
-
- @SuppressWarnings("synthetic-access")
- public void free(long addr) {
- free(addr, true);
- }
-
- private void free(long addr, boolean updateStats) {
- int cSize = Chunk.getSize(addr);
- if (updateStats) {
- this.ma.stats.incObjects(-1);
- this.allocatedSize.addAndGet(-cSize);
- this.ma.stats.incUsedMemory(-cSize);
- this.ma.stats.incFreeMemory(cSize);
- this.ma.notifyListeners();
- }
- if (cSize <= SimpleMemoryAllocatorImpl.MAX_TINY) {
- freeTiny(addr, cSize);
+ if (useFragments) {
+ return allocateFromFragments(((idx+1)*multiple)+offset, chunkType);
+ } else {
+ return null;
+ }
+ }
+ private Chunk allocateHuge(int size, boolean useFragments, ChunkType chunkType) {
+ // sizeHolder is a fake Chunk used to search our sorted hugeChunkSet.
+ Chunk sizeHolder = new FakeChunk(size);
+ NavigableSet<Chunk> ts = this.hugeChunkSet.tailSet(sizeHolder);
+ Chunk result = ts.pollFirst();
+ if (result != null) {
+ if (result.getSize() - (SimpleMemoryAllocatorImpl.HUGE_MULTIPLE - Chunk.OFF_HEAP_HEADER_SIZE) < size) {
+ // close enough to the requested size; just return it.
+
+ // Data integrity check.
+ if(this.ma.validateMemoryWithFill) {
+ result.validateFill();
+ }
+ if (chunkType.getSrcType() != Chunk.getSrcType(result.getMemoryAddress())) {
+ // The java wrapper class that was cached in the huge chunk list is the wrong type.
+ // So allocate a new one and garbage collect the old one.
+ result = this.ma.chunkFactory.newChunk(result.getMemoryAddress(), chunkType);
+ }
+ result.readyForAllocation(chunkType);
+ return result;
} else {
- freeHuge(addr, cSize);
+ this.hugeChunkSet.add(result);
}
}
- private void freeTiny(long addr, int cSize) {
- basicFree(addr, getNearestTinyMultiple(cSize), this.tinyFreeLists);
+ if (useFragments) {
+ // We round it up to the next multiple of TINY_MULTIPLE to make
+ // sure we always have chunks allocated on an 8 byte boundary.
+ return allocateFromFragments(round(SimpleMemoryAllocatorImpl.TINY_MULTIPLE, size), chunkType);
+ } else {
+ return null;
+ }
+ }
+
+ @SuppressWarnings("synthetic-access")
+ public void free(long addr) {
+ free(addr, true);
+ }
+
+ private void free(long addr, boolean updateStats) {
+ int cSize = Chunk.getSize(addr);
+ if (updateStats) {
+ this.ma.stats.incObjects(-1);
+ this.allocatedSize.addAndGet(-cSize);
+ this.ma.stats.incUsedMemory(-cSize);
+ this.ma.stats.incFreeMemory(cSize);
+ this.ma.notifyListeners();
}
- private void basicFree(long addr, int idx, AtomicReferenceArray<SyncChunkStack> freeLists) {
- SyncChunkStack clq = freeLists.get(idx);
- if (clq != null) {
- clq.offer(addr);
- } else {
- clq = new SyncChunkStack();
+ if (cSize <= SimpleMemoryAllocatorImpl.MAX_TINY) {
+ freeTiny(addr, cSize);
+ } else {
+ freeHuge(addr, cSize);
+ }
+ }
+ private void freeTiny(long addr, int cSize) {
+ basicFree(addr, getNearestTinyMultiple(cSize), this.tinyFreeLists);
+ }
+ private void basicFree(long addr, int idx, AtomicReferenceArray<SyncChunkStack> freeLists) {
+ SyncChunkStack clq = freeLists.get(idx);
+ if (clq != null) {
+ clq.offer(addr);
+ } else {
+ clq = new SyncChunkStack();
+ clq.offer(addr);
+ if (!freeLists.compareAndSet(idx, null, clq)) {
+ clq = freeLists.get(idx);
clq.offer(addr);
- if (!freeLists.compareAndSet(idx, null, clq)) {
- clq = freeLists.get(idx);
- clq.offer(addr);
- }
}
-
}
- private void freeHuge(long addr, int cSize) {
- this.hugeChunkSet.add(this.ma.chunkFactory.newChunk(addr)); // TODO make this a collection of longs
+
+ }
+ private void freeHuge(long addr, int cSize) {
+ this.hugeChunkSet.add(this.ma.chunkFactory.newChunk(addr)); // TODO make this a collection of longs
+ }
+
+ List<MemoryBlock> getOrderedBlocks() {
+ final List<MemoryBlock> value = new ArrayList<MemoryBlock>();
+ addBlocksFromFragments(this.fragmentList, value); // unused fragments
+ addBlocksFromChunks(getLiveChunks(), value); // used chunks
+ addBlocksFromChunks(this.hugeChunkSet, value); // huge free chunks
+ addMemoryBlocks(getTinyFreeBlocks(), value); // tiny free chunks
+ Collections.sort(value,
+ new Comparator<MemoryBlock>() {
+ @Override
+ public int compare(MemoryBlock o1, MemoryBlock o2) {
+ return Long.valueOf(o1.getMemoryAddress()).compareTo(o2.getMemoryAddress());
+ }
+ });
+ return value;
+ }
+ private void addBlocksFromFragments(Collection<Fragment> src, List<MemoryBlock> dest) {
+ for (MemoryBlock block : src) {
+ dest.add(new MemoryBlockNode(this.ma, block));
+ }
+ }
+
+ private void addBlocksFromChunks(Collection<Chunk> src, List<MemoryBlock> dest) {
+ for (Chunk chunk : src) {
+ dest.add(new MemoryBlockNode(this.ma, chunk));
+ }
+ }
+
+ private void addMemoryBlocks(Collection<MemoryBlock> src, List<MemoryBlock> dest) {
+ for (MemoryBlock block : src) {
+ dest.add(new MemoryBlockNode(this.ma, block));
+ }
+ }
+
+ private List<MemoryBlock> getTinyFreeBlocks() {
+ final List<MemoryBlock> value = new ArrayList<MemoryBlock>();
+ final SimpleMemoryAllocatorImpl sma = this.ma;
+ for (int i = 0; i < this.tinyFreeLists.length(); i++) {
+ if (this.tinyFreeLists.get(i) == null) continue;
+ long addr = this.tinyFreeLists.get(i).getTopAddress();
+ while (addr != 0L) {
+ value.add(new MemoryBlockNode(sma, new TinyMemoryBlock(addr, i)));
+ addr = Chunk.getNext(addr);
+ }
+ }
+ return value;
+ }
+ List<MemoryBlock> getAllocatedBlocks() {
+ final List<MemoryBlock> value = new ArrayList<MemoryBlock>();
+ addBlocksFromChunks(getLiveChunks(), value); // used chunks
+ Collections.sort(value,
+ new Comparator<MemoryBlock>() {
+ @Override
+ public int compare(MemoryBlock o1, MemoryBlock o2) {
+ return Long.valueOf(o1.getMemoryAddress()).compareTo(o2.getMemoryAddress());
+ }
+ });
+ return value;
+ }
+ /**
+ * Used to represent an address from a tiny free list as a MemoryBlock
+ */
+ private static final class TinyMemoryBlock implements MemoryBlock {
+ private final long address;
+ private final int freeListId;
+
+ private TinyMemoryBlock(long address, int freeListId) {
+ this.address = address;
+ this.freeListId = freeListId;
+ }
+
+ @Override
+ public State getState() {
+ return State.DEALLOCATED;
+ }
+
+ @Override
+ public long getMemoryAddress() {
+ return address;
+ }
+
+ @Override
+ public int getBlockSize() {
+ return Chunk.getSize(address);
+ }
+
+ @Override
+ public MemoryBlock getNextBlock() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int getSlabId() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int getFreeListId() {
+ return freeListId;
+ }
+
+ @Override
+ public int getRefCount() {
+ return 0;
+ }
+
+ @Override
+ public String getDataType() {
+ return "N/A";
+ }
+
+ @Override
+ public boolean isSerialized() {
+ return false;
+ }
+
+ @Override
+ public boolean isCompressed() {
+ return false;
+ }
+
+ @Override
+ public Object getDataValue() {
+ return null;
+ }
+
+ @Override
+ public ChunkType getChunkType() {
+ return null;
}
- }
\ No newline at end of file
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8e7fabf0/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
index eacfa06..e94bf4d 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/SimpleMemoryAllocatorImpl.java
@@ -18,7 +18,6 @@ package com.gemstone.gemfire.internal.offheap;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
@@ -27,8 +26,6 @@ import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
-import java.util.concurrent.atomic.AtomicReferenceArray;
-
import org.apache.logging.log4j.Logger;
import com.gemstone.gemfire.LogWriter;
@@ -617,12 +614,6 @@ public final class SimpleMemoryAllocatorImpl implements MemoryAllocator, MemoryI
}
}
- private void printSlabs() {
- for (int i =0; i < this.slabs.length; i++) {
- logger.info(slabs[i]);
- }
- }
-
/** The inspection snapshot for MemoryInspector */
private List<MemoryBlock> memoryBlocks;
@@ -691,16 +682,7 @@ public final class SimpleMemoryAllocatorImpl implements MemoryAllocator, MemoryI
@Override
public List<MemoryBlock> getAllocatedBlocks() {
- final List<MemoryBlock> value = new ArrayList<MemoryBlock>();
- addBlocksFromChunks(this.freeList.getLiveChunks(), value); // used chunks
- Collections.sort(value,
- new Comparator<MemoryBlock>() {
- @Override
- public int compare(MemoryBlock o1, MemoryBlock o2) {
- return Long.valueOf(o1.getMemoryAddress()).compareTo(o2.getMemoryAddress());
- }
- });
- return value;
+ return this.freeList.getAllocatedBlocks();
}
@Override
@@ -733,103 +715,7 @@ public final class SimpleMemoryAllocatorImpl implements MemoryAllocator, MemoryI
}
private List<MemoryBlock> getOrderedBlocks() {
- final List<MemoryBlock> value = new ArrayList<MemoryBlock>();
- addBlocksFromFragments(this.freeList.fragmentList, value); // unused fragments
- addBlocksFromChunks(this.freeList.getLiveChunks(), value); // used chunks
- addBlocksFromChunks(this.freeList.hugeChunkSet, value); // huge free chunks
- addMemoryBlocks(getTinyFreeBlocks(), value); // tiny free chunks
- Collections.sort(value,
- new Comparator<MemoryBlock>() {
- @Override
- public int compare(MemoryBlock o1, MemoryBlock o2) {
- return Long.valueOf(o1.getMemoryAddress()).compareTo(o2.getMemoryAddress());
- }
- });
- return value;
- }
-
- private void addBlocksFromFragments(Collection<Fragment> src, List<MemoryBlock> dest) {
- for (MemoryBlock block : src) {
- dest.add(new MemoryBlockNode(this, block));
- }
- }
-
- private void addBlocksFromChunks(Collection<Chunk> src, List<MemoryBlock> dest) {
- for (Chunk chunk : src) {
- dest.add(new MemoryBlockNode(this, chunk));
- }
- }
-
- private void addMemoryBlocks(Collection<MemoryBlock> src, List<MemoryBlock> dest) {
- for (MemoryBlock block : src) {
- dest.add(new MemoryBlockNode(this, block));
- }
- }
-
- private List<MemoryBlock> getTinyFreeBlocks() {
- List<MemoryBlock> value = new ArrayList<MemoryBlock>();
- AtomicReferenceArray<SyncChunkStack> chunkStacks = this.freeList.tinyFreeLists;
- for (int i = 0; i < chunkStacks.length(); i++) {
- if (chunkStacks.get(i) == null) continue;
- long addr = chunkStacks.get(i).getTopAddress();
- final int size = Chunk.getSize(addr);
- final long address = addr;
- final int freeListId = i;
- while (addr != 0L) {
- value.add(new MemoryBlockNode(this, new MemoryBlock() {
- @Override
- public State getState() {
- return State.DEALLOCATED;
- }
- @Override
- public long getMemoryAddress() {
- return address;
- }
- @Override
- public int getBlockSize() {
- return size;
- }
- @Override
- public MemoryBlock getNextBlock() {
- throw new UnsupportedOperationException();
- }
- @Override
- public int getSlabId() {
- throw new UnsupportedOperationException();
- }
- @Override
- public int getFreeListId() {
- return freeListId;
- }
- @Override
- public int getRefCount() {
- return 0;
- }
- @Override
- public String getDataType() {
- return "N/A";
- }
- @Override
- public boolean isSerialized() {
- return false;
- }
- @Override
- public boolean isCompressed() {
- return false;
- }
- @Override
- public Object getDataValue() {
- return null;
- }
- @Override
- public ChunkType getChunkType() {
- return null;
- }
- }));
- addr = Chunk.getNext(addr);
- }
- }
- return value;
+ return this.freeList.getOrderedBlocks();
}
/*
[2/5] incubator-geode git commit: removed more "big" code from
FreeListManager
Posted by ds...@apache.org.
removed more "big" code from FreeListManager
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/3c998ad1
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/3c998ad1
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/3c998ad1
Branch: refs/heads/feature/GEODE-580
Commit: 3c998ad11c9040843d3bc9481bb9702ea52daf4e
Parents: 5ef5150
Author: Darrel Schneider <ds...@pivotal.io>
Authored: Mon Nov 23 15:00:27 2015 -0800
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Mon Nov 23 15:00:27 2015 -0800
----------------------------------------------------------------------
.../internal/offheap/FreeListManager.java | 24 --------------------
1 file changed, 24 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/3c998ad1/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java
index f335b4b..fc0f950 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/offheap/FreeListManager.java
@@ -226,7 +226,6 @@ public class FreeListManager {
lw.info("OutOfOffHeapMemory allocating size of " + chunkSize + ". allocated=" + this.allocatedSize.get() + " compactions=" + this.compactCount.get() + " objects=" + this.ma.stats.getObjects() + " free=" + this.ma.stats.getFreeMemory() + " fragments=" + this.ma.stats.getFragments() + " largestFragment=" + this.ma.stats.getLargestFragment() + " fragmentation=" + this.ma.stats.getFragmentation());
logFragmentState(lw);
logTinyState(lw);
-// logBigState(lw);
logHugeState(lw);
}
}
@@ -236,14 +235,6 @@ public class FreeListManager {
lw.info("Free huge of size " + c.getSize());
}
}
-// private void logBigState(LogWriter lw) {
-// for (int i=0; i < this.bigFreeLists.length(); i++) {
-// ConcurrentChunkStack cl = this.bigFreeLists.get(i);
-// if (cl != null) {
-// cl.logSizes(lw, "Free big of size ");
-// }
-// }
-// }
private void logTinyState(LogWriter lw) {
for (int i=0; i < this.tinyFreeLists.length(); i++) {
SyncChunkStack cl = this.tinyFreeLists.get(i);
@@ -435,7 +426,6 @@ public class FreeListManager {
private void collectFreeChunks(List<SyncChunkStack> l) {
collectFreeFragmentChunks(l);
collectFreeHugeChunks(l);
-// collectFreeBigChunks(l);
collectFreeTinyChunks(l);
}
private void collectFreeFragmentChunks(List<SyncChunkStack> l) {
@@ -481,17 +471,6 @@ public class FreeListManager {
}
}
}
-// private void collectFreeBigChunks(List<ConcurrentChunkStack> l) {
-// for (int i=0; i < this.bigFreeLists.length(); i++) {
-// ConcurrentChunkStack cl = this.bigFreeLists.get(i);
-// if (cl != null) {
-// long head = cl.clear();
-// if (head != 0L) {
-// l.add(new ConcurrentChunkStack(head));
-// }
-// }
-// }
-// }
public void collectFreeHugeChunks(List<SyncChunkStack> l) {
Chunk c = this.hugeChunkSet.pollFirst();
SyncChunkStack result = null;
@@ -578,9 +557,6 @@ public class FreeListManager {
private Chunk allocateTiny(int size, boolean useFragments, ChunkType chunkType) {
return basicAllocate(getNearestTinyMultiple(size), SimpleMemoryAllocatorImpl.TINY_MULTIPLE, 0, this.tinyFreeLists, useFragments, chunkType);
}
-// private Chunk allocateBig(int size, boolean useFragments) {
-// return basicAllocate(getNearestBigMultiple(size), BIG_MULTIPLE, BIG_OFFSET, this.bigFreeLists, useFragments);
-// }
private Chunk basicAllocate(int idx, int multiple, int offset, AtomicReferenceArray<SyncChunkStack> freeLists, boolean useFragments, ChunkType chunkType) {
SyncChunkStack clq = freeLists.get(idx);
if (clq != null) {