You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by sh...@apache.org on 2016/11/15 05:31:02 UTC

lucene-solr:branch_6x: SOLR-9366: Limit memory consumed by FastLRUCache with a new 'maxRamMB' config parameter

Repository: lucene-solr
Updated Branches:
  refs/heads/branch_6x 8989c7747 -> 53dad4f0d


SOLR-9366: Limit memory consumed by FastLRUCache with a new 'maxRamMB' config parameter

(cherry picked from commit 487b097)


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/53dad4f0
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/53dad4f0
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/53dad4f0

Branch: refs/heads/branch_6x
Commit: 53dad4f0d1c8f563810626ef9ab470fb3e5d1da9
Parents: 8989c77
Author: Shalin Shekhar Mangar <sh...@apache.org>
Authored: Tue Nov 15 10:59:58 2016 +0530
Committer: Shalin Shekhar Mangar <sh...@apache.org>
Committed: Tue Nov 15 11:00:52 2016 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   3 +
 .../org/apache/solr/search/FastLRUCache.java    |  28 +-
 .../java/org/apache/solr/search/LRUCache.java   |   4 +-
 .../apache/solr/util/ConcurrentLRUCache.java    | 427 ++++++++++++-------
 .../basic_configs/conf/solrconfig.xml           |   3 +
 .../conf/solrconfig.xml                         |   3 +
 .../conf/solrconfig.xml                         |   5 +-
 7 files changed, 313 insertions(+), 160 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53dad4f0/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 26beef7..c28db06 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -60,6 +60,9 @@ New Features
 
 * SOLR-9038: Add a command-line tool to manage the snapshots functionality (Hrishikesh Gadre via yonik)
 
+* SOLR-9366: Limit memory consumed by FastLRUCache with a new 'maxRamMB' config parameter.
+  (yonik, Michael Sun, shalin)
+
 Optimizations
 ----------------------
 * SOLR-9704: Facet Module / JSON Facet API: Optimize blockChildren facets that have

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53dad4f0/solr/core/src/java/org/apache/solr/search/FastLRUCache.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/FastLRUCache.java b/solr/core/src/java/org/apache/solr/search/FastLRUCache.java
index 2ae752e..6c2e4d5 100644
--- a/solr/core/src/java/org/apache/solr/search/FastLRUCache.java
+++ b/solr/core/src/java/org/apache/solr/search/FastLRUCache.java
@@ -43,7 +43,7 @@ import java.util.concurrent.TimeUnit;
  * @see org.apache.solr.search.SolrCache
  * @since solr 1.4
  */
-public class FastLRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V> {
+public class FastLRUCache<K, V> extends SolrCacheBase implements SolrCache<K,V> {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
   // contains the statistics objects for all open caches of the same type
@@ -55,6 +55,8 @@ public class FastLRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V> {
   private ConcurrentLRUCache<K,V> cache;
   private int showItems = 0;
 
+  private long maxRamBytes;
+
   @Override
   public Object init(Map args, Object persistence, CacheRegenerator regenerator) {
     super.init(args, regenerator);
@@ -87,8 +89,18 @@ public class FastLRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V> {
 
     str = (String) args.get("showItems");
     showItems = str == null ? 0 : Integer.parseInt(str);
-    description = generateDescription(limit, initialSize, minLimit, acceptableLimit, newThread);
-    cache = new ConcurrentLRUCache<>(limit, minLimit, acceptableLimit, initialSize, newThread, false, null);
+
+    str = (String) args.get("maxRamMB");
+    this.maxRamBytes = str == null ? Long.MAX_VALUE : (long) (Double.parseDouble(str) * 1024L * 1024L);
+    if (maxRamBytes != Long.MAX_VALUE)  {
+      int ramLowerWatermark = (int) (maxRamBytes * 0.8);
+      description = generateDescription(maxRamBytes, ramLowerWatermark, newThread);
+      cache = new ConcurrentLRUCache<K, V>(ramLowerWatermark, maxRamBytes, newThread, null);
+    } else  {
+      description = generateDescription(limit, initialSize, minLimit, acceptableLimit, newThread);
+      cache = new ConcurrentLRUCache<>(limit, minLimit, acceptableLimit, initialSize, newThread, false, null);
+    }
+
     cache.setAlive(false);
 
     statsList = (List<ConcurrentLRUCache.Stats>) persistence;
@@ -118,6 +130,16 @@ public class FastLRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V> {
     return description;
   }
 
+  protected String generateDescription(long maxRamBytes, long ramLowerWatermark, boolean newThread) {
+    String description = "Concurrent LRU Cache(ramMinSize=" + ramLowerWatermark + ", ramMaxSize" + maxRamBytes
+        + ", cleanupThread=" + newThread;
+    if (isAutowarmingOn()) {
+      description += ", " + getAutowarmDescription();
+    }
+    description += ')';
+    return description;
+  }
+
   @Override
   public int size() {
     return cache.size();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53dad4f0/solr/core/src/java/org/apache/solr/search/LRUCache.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/LRUCache.java b/solr/core/src/java/org/apache/solr/search/LRUCache.java
index 0d9f406..b178fb2 100644
--- a/solr/core/src/java/org/apache/solr/search/LRUCache.java
+++ b/solr/core/src/java/org/apache/solr/search/LRUCache.java
@@ -46,9 +46,9 @@ public class LRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V>, Acco
   ///  Copied from Lucene's LRUQueryCache
 
   // memory usage of a simple term query
-  static final long DEFAULT_RAM_BYTES_USED = 192;
+  public static final long DEFAULT_RAM_BYTES_USED = 192;
 
-  static final long HASHTABLE_RAM_BYTES_PER_ENTRY =
+  public static final long HASHTABLE_RAM_BYTES_PER_ENTRY =
       2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF // key + value
           * 2; // hash tables need to be oversized to avoid collisions, assume 2x capacity
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53dad4f0/solr/core/src/java/org/apache/solr/util/ConcurrentLRUCache.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/ConcurrentLRUCache.java b/solr/core/src/java/org/apache/solr/util/ConcurrentLRUCache.java
index be14437..e875828 100644
--- a/solr/core/src/java/org/apache/solr/util/ConcurrentLRUCache.java
+++ b/solr/core/src/java/org/apache/solr/util/ConcurrentLRUCache.java
@@ -15,14 +15,20 @@
  * limitations under the License.
  */
 package org.apache.solr.util;
+import org.apache.lucene.util.Accountable;
 import org.apache.lucene.util.PriorityQueue;
+import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.solr.common.util.Cache;
+import org.apache.solr.search.LRUCache;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.LinkedHashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
@@ -45,9 +51,11 @@ import java.lang.ref.WeakReference;
  *
  * @since solr 1.4
  */
-public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
+public class ConcurrentLRUCache<K,V> implements Cache<K,V>, Accountable {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
+  static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConcurrentLRUCache.class);
+
   private final ConcurrentHashMap<Object, CacheEntry<K,V>> map;
   private final int upperWaterMark, lowerWaterMark;
   private final ReentrantLock markAndSweepLock = new ReentrantLock(true);
@@ -58,7 +66,29 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
   private final int acceptableWaterMark;
   private long oldestEntry = 0;  // not volatile, only accessed in the cleaning method
   private final EvictionListener<K,V> evictionListener;
-  private CleanupThread cleanupThread ;
+  private CleanupThread cleanupThread;
+
+  private final long ramLowerWatermark, ramUpperWatermark;
+  private final AtomicLong ramBytes = new AtomicLong(0);
+
+  public ConcurrentLRUCache(long ramLowerWatermark, long ramUpperWatermark,
+                            boolean runCleanupThread, EvictionListener<K, V> evictionListener) {
+    this.ramLowerWatermark = ramLowerWatermark;
+    this.ramUpperWatermark = ramUpperWatermark;
+
+    this.evictionListener = evictionListener;
+    this.map = new ConcurrentHashMap<>();
+    this.newThreadForCleanup = false;
+
+    this.acceptableWaterMark = -1;
+    this.lowerWaterMark = Integer.MIN_VALUE;
+    this.upperWaterMark = Integer.MAX_VALUE;
+
+    if (runCleanupThread) {
+      cleanupThread = new CleanupThread(this);
+      cleanupThread.start();
+    }
+  }
 
   public ConcurrentLRUCache(int upperWaterMark, final int lowerWaterMark, int acceptableWatermark,
                             int initialSize, boolean runCleanupThread, boolean runNewThreadForCleanup,
@@ -76,6 +106,8 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
       cleanupThread = new CleanupThread(this);
       cleanupThread.start();
     }
+    this.ramLowerWatermark = Long.MIN_VALUE;
+    this.ramUpperWatermark = Long.MAX_VALUE;
   }
 
   public ConcurrentLRUCache(int size, int lowerWatermark) {
@@ -103,6 +135,9 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
     CacheEntry<K,V> cacheEntry = map.remove(key);
     if (cacheEntry != null) {
       stats.size.decrementAndGet();
+      if (ramUpperWatermark != Long.MAX_VALUE)  {
+        ramBytes.addAndGet(-cacheEntry.ramBytesUsed() - LRUCache.HASHTABLE_RAM_BYTES_PER_ENTRY);
+      }
       return cacheEntry.value;
     }
     return null;
@@ -116,8 +151,23 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
     int currentSize;
     if (oldCacheEntry == null) {
       currentSize = stats.size.incrementAndGet();
+      if (ramUpperWatermark != Long.MAX_VALUE)  {
+        ramBytes.addAndGet(e.ramBytesUsed() + LRUCache.HASHTABLE_RAM_BYTES_PER_ENTRY); // added key + value + entry
+      }
     } else {
       currentSize = stats.size.get();
+      if (ramUpperWatermark != Long.MAX_VALUE)  {
+        if (oldCacheEntry.value instanceof Accountable) {
+          ramBytes.addAndGet(-((Accountable)oldCacheEntry.value).ramBytesUsed());
+        } else  {
+          ramBytes.addAndGet(-LRUCache.DEFAULT_RAM_BYTES_USED);
+        }
+        if (val instanceof Accountable) {
+          ramBytes.addAndGet(((Accountable)val).ramBytesUsed());
+        } else  {
+          ramBytes.addAndGet(LRUCache.DEFAULT_RAM_BYTES_USED);
+        }
+      }
     }
     if (islive) {
       stats.putCounter.increment();
@@ -135,7 +185,7 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
     //
     // Thread safety note: isCleaning read is piggybacked (comes after) other volatile reads
     // in this method.
-    if (currentSize > upperWaterMark && !isCleaning) {
+    if ((currentSize > upperWaterMark || ramBytes.get() > ramUpperWatermark) && !isCleaning) {
       if (newThreadForCleanup) {
         new Thread(this::markAndSweep).start();
       } else if (cleanupThread != null){
@@ -169,187 +219,223 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
 
     if (!markAndSweepLock.tryLock()) return;
     try {
-      long oldestEntry = this.oldestEntry;
-      isCleaning = true;
-      this.oldestEntry = oldestEntry;     // volatile write to make isCleaning visible
-
-      long timeCurrent = stats.accessCounter.longValue();
-      int sz = stats.size.get();
+      if (upperWaterMark != Integer.MAX_VALUE) {
+        markAndSweepByCacheSize();
+      } else if (ramUpperWatermark != Long.MAX_VALUE) {
+        markAndSweepByRamSize();
+      } else  {
+        // should never happen
+        throw new AssertionError("ConcurrentLRUCache initialized with neither size limits nor ram limits");
+      }
+    } finally {
+      isCleaning = false;  // set before markAndSweep.unlock() for visibility
+      markAndSweepLock.unlock();
+    }
+  }
 
-      int numRemoved = 0;
-      int numKept = 0;
-      long newestEntry = timeCurrent;
-      long newNewestEntry = -1;
-      long newOldestEntry = Long.MAX_VALUE;
+  /*
+    Must be called after acquiring markAndSweeoLock
+   */
+  private void markAndSweepByRamSize() {
+    List<CacheEntry<K, V>> entriesInAccessOrder = new ArrayList<>(map.size());
+    map.forEach((o, kvCacheEntry) -> {
+      kvCacheEntry.lastAccessedCopy = kvCacheEntry.lastAccessed; // important because we want to avoid volatile read during comparisons
+      entriesInAccessOrder.add(kvCacheEntry);
+    });
+
+    Collections.sort(entriesInAccessOrder); // newer access is smaller, older access is bigger
+
+    // iterate in oldest to newest order
+    for (int i = entriesInAccessOrder.size() - 1; i >= 0; i--) {
+      CacheEntry<K, V> kvCacheEntry = entriesInAccessOrder.get(i);
+      evictEntry(kvCacheEntry.key);
+      ramBytes.addAndGet(-(kvCacheEntry.ramBytesUsed() + LRUCache.HASHTABLE_RAM_BYTES_PER_ENTRY));
+      if (ramBytes.get() <= ramLowerWatermark)  {
+        break; // we are done!
+      }
+    }
+  }
 
-      int wantToKeep = lowerWaterMark;
-      int wantToRemove = sz - lowerWaterMark;
+  /*
+    Must be called after acquiring markAndSweeoLock
+   */
+  private void markAndSweepByCacheSize() {
+    long oldestEntry = this.oldestEntry;
+    isCleaning = true;
+    this.oldestEntry = oldestEntry;     // volatile write to make isCleaning visible
+
+    long timeCurrent = stats.accessCounter.longValue();
+    int sz = stats.size.get();
+
+    int numRemoved = 0;
+    int numKept = 0;
+    long newestEntry = timeCurrent;
+    long newNewestEntry = -1;
+    long newOldestEntry = Long.MAX_VALUE;
+
+    int wantToKeep = lowerWaterMark;
+    int wantToRemove = sz - lowerWaterMark;
+
+    @SuppressWarnings("unchecked") // generic array's are annoying
+    CacheEntry<K,V>[] eset = new CacheEntry[sz];
+    int eSize = 0;
+
+    // System.out.println("newestEntry="+newestEntry + " oldestEntry="+oldestEntry);
+    // System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
+
+    for (CacheEntry<K,V> ce : map.values()) {
+      // set lastAccessedCopy to avoid more volatile reads
+      ce.lastAccessedCopy = ce.lastAccessed;
+      long thisEntry = ce.lastAccessedCopy;
+
+      // since the wantToKeep group is likely to be bigger than wantToRemove, check it first
+      if (thisEntry > newestEntry - wantToKeep) {
+        // this entry is guaranteed not to be in the bottom
+        // group, so do nothing.
+        numKept++;
+        newOldestEntry = Math.min(thisEntry, newOldestEntry);
+      } else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group?
+        // this entry is guaranteed to be in the bottom group
+        // so immediately remove it from the map.
+        evictEntry(ce.key);
+        numRemoved++;
+      } else {
+        // This entry *could* be in the bottom group.
+        // Collect these entries to avoid another full pass... this is wasted
+        // effort if enough entries are normally removed in this first pass.
+        // An alternate impl could make a full second pass.
+        if (eSize < eset.length-1) {
+          eset[eSize++] = ce;
+          newNewestEntry = Math.max(thisEntry, newNewestEntry);
+          newOldestEntry = Math.min(thisEntry, newOldestEntry);
+        }
+      }
+    }
 
-      @SuppressWarnings("unchecked") // generic array's are annoying
-      CacheEntry<K,V>[] eset = new CacheEntry[sz];
-      int eSize = 0;
+    // System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
+    // TODO: allow this to be customized in the constructor?
+    int numPasses=1; // maximum number of linear passes over the data
 
-      // System.out.println("newestEntry="+newestEntry + " oldestEntry="+oldestEntry);
-      // System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
+    // if we didn't remove enough entries, then make more passes
+    // over the values we collected, with updated min and max values.
+    while (sz - numRemoved > acceptableWaterMark && --numPasses>=0) {
 
-      for (CacheEntry<K,V> ce : map.values()) {
-        // set lastAccessedCopy to avoid more volatile reads
-        ce.lastAccessedCopy = ce.lastAccessed;
+      oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry;
+      newOldestEntry = Long.MAX_VALUE;
+      newestEntry = newNewestEntry;
+      newNewestEntry = -1;
+      wantToKeep = lowerWaterMark - numKept;
+      wantToRemove = sz - lowerWaterMark - numRemoved;
+
+      // iterate backward to make it easy to remove items.
+      for (int i=eSize-1; i>=0; i--) {
+        CacheEntry<K,V> ce = eset[i];
         long thisEntry = ce.lastAccessedCopy;
 
-        // since the wantToKeep group is likely to be bigger than wantToRemove, check it first
         if (thisEntry > newestEntry - wantToKeep) {
           // this entry is guaranteed not to be in the bottom
-          // group, so do nothing.
+          // group, so do nothing but remove it from the eset.
           numKept++;
+          // remove the entry by moving the last element to its position
+          eset[i] = eset[eSize-1];
+          eSize--;
+
           newOldestEntry = Math.min(thisEntry, newOldestEntry);
+
         } else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group?
+
           // this entry is guaranteed to be in the bottom group
           // so immediately remove it from the map.
           evictEntry(ce.key);
           numRemoved++;
+
+          // remove the entry by moving the last element to its position
+          eset[i] = eset[eSize-1];
+          eSize--;
         } else {
-          // This entry *could* be in the bottom group.
-          // Collect these entries to avoid another full pass... this is wasted
-          // effort if enough entries are normally removed in this first pass.
-          // An alternate impl could make a full second pass.
-          if (eSize < eset.length-1) {
-            eset[eSize++] = ce;
-            newNewestEntry = Math.max(thisEntry, newNewestEntry);
-            newOldestEntry = Math.min(thisEntry, newOldestEntry);
-          }
+          // This entry *could* be in the bottom group, so keep it in the eset,
+          // and update the stats.
+          newNewestEntry = Math.max(thisEntry, newNewestEntry);
+          newOldestEntry = Math.min(thisEntry, newOldestEntry);
         }
       }
-
       // System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
-      // TODO: allow this to be customized in the constructor?
-      int numPasses=1; // maximum number of linear passes over the data
-
-      // if we didn't remove enough entries, then make more passes
-      // over the values we collected, with updated min and max values.
-      while (sz - numRemoved > acceptableWaterMark && --numPasses>=0) {
-
-        oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry;
-        newOldestEntry = Long.MAX_VALUE;
-        newestEntry = newNewestEntry;
-        newNewestEntry = -1;
-        wantToKeep = lowerWaterMark - numKept;
-        wantToRemove = sz - lowerWaterMark - numRemoved;
-
-        // iterate backward to make it easy to remove items.
-        for (int i=eSize-1; i>=0; i--) {
-          CacheEntry<K,V> ce = eset[i];
-          long thisEntry = ce.lastAccessedCopy;
-
-          if (thisEntry > newestEntry - wantToKeep) {
-            // this entry is guaranteed not to be in the bottom
-            // group, so do nothing but remove it from the eset.
-            numKept++;
-            // remove the entry by moving the last element to its position
-            eset[i] = eset[eSize-1];
-            eSize--;
-
-            newOldestEntry = Math.min(thisEntry, newOldestEntry);
-            
-          } else if (thisEntry < oldestEntry + wantToRemove) { // entry in bottom group?
-
-            // this entry is guaranteed to be in the bottom group
-            // so immediately remove it from the map.
-            evictEntry(ce.key);
-            numRemoved++;
-
-            // remove the entry by moving the last element to its position
-            eset[i] = eset[eSize-1];
-            eSize--;
-          } else {
-            // This entry *could* be in the bottom group, so keep it in the eset,
-            // and update the stats.
-            newNewestEntry = Math.max(thisEntry, newNewestEntry);
-            newOldestEntry = Math.min(thisEntry, newOldestEntry);
-          }
-        }
-        // System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
-      }
+    }
 
 
+    // if we still didn't remove enough entries, then make another pass while
+    // inserting into a priority queue
+    if (sz - numRemoved > acceptableWaterMark) {
 
-      // if we still didn't remove enough entries, then make another pass while
-      // inserting into a priority queue
-      if (sz - numRemoved > acceptableWaterMark) {
-
-        oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry;
-        newOldestEntry = Long.MAX_VALUE;
-        newestEntry = newNewestEntry;
-        newNewestEntry = -1;
-        wantToKeep = lowerWaterMark - numKept;
-        wantToRemove = sz - lowerWaterMark - numRemoved;
-
-        PQueue<K,V> queue = new PQueue<>(wantToRemove);
-
-        for (int i=eSize-1; i>=0; i--) {
-          CacheEntry<K,V> ce = eset[i];
-          long thisEntry = ce.lastAccessedCopy;
-
-          if (thisEntry > newestEntry - wantToKeep) {
-            // this entry is guaranteed not to be in the bottom
-            // group, so do nothing but remove it from the eset.
-            numKept++;
-            // removal not necessary on last pass.
-            // eset[i] = eset[eSize-1];
-            // eSize--;
-
-            newOldestEntry = Math.min(thisEntry, newOldestEntry);
-            
-          } else if (thisEntry < oldestEntry + wantToRemove) {  // entry in bottom group?
-            // this entry is guaranteed to be in the bottom group
-            // so immediately remove it.
-            evictEntry(ce.key);
-            numRemoved++;
-
-            // removal not necessary on last pass.
-            // eset[i] = eset[eSize-1];
-            // eSize--;
-          } else {
-            // This entry *could* be in the bottom group.
-            // add it to the priority queue
-
-            // everything in the priority queue will be removed, so keep track of
-            // the lowest value that ever comes back out of the queue.
-
-            // first reduce the size of the priority queue to account for
-            // the number of items we have already removed while executing
-            // this loop so far.
-            queue.myMaxSize = sz - lowerWaterMark - numRemoved;
-            while (queue.size() > queue.myMaxSize && queue.size() > 0) {
-              CacheEntry otherEntry = queue.pop();
-              newOldestEntry = Math.min(otherEntry.lastAccessedCopy, newOldestEntry);
-            }
-            if (queue.myMaxSize <= 0) break;
-
-            Object o = queue.myInsertWithOverflow(ce);
-            if (o != null) {
-              newOldestEntry = Math.min(((CacheEntry)o).lastAccessedCopy, newOldestEntry);
-            }
-          }
-        }
+      oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry;
+      newOldestEntry = Long.MAX_VALUE;
+      newestEntry = newNewestEntry;
+      newNewestEntry = -1;
+      wantToKeep = lowerWaterMark - numKept;
+      wantToRemove = sz - lowerWaterMark - numRemoved;
+
+      PQueue<K,V> queue = new PQueue<>(wantToRemove);
+
+      for (int i=eSize-1; i>=0; i--) {
+        CacheEntry<K,V> ce = eset[i];
+        long thisEntry = ce.lastAccessedCopy;
+
+        if (thisEntry > newestEntry - wantToKeep) {
+          // this entry is guaranteed not to be in the bottom
+          // group, so do nothing but remove it from the eset.
+          numKept++;
+          // removal not necessary on last pass.
+          // eset[i] = eset[eSize-1];
+          // eSize--;
 
-        // Now delete everything in the priority queue.
-        // avoid using pop() since order doesn't matter anymore
-        for (CacheEntry<K,V> ce : queue.getValues()) {
-          if (ce==null) continue;
+          newOldestEntry = Math.min(thisEntry, newOldestEntry);
+
+        } else if (thisEntry < oldestEntry + wantToRemove) {  // entry in bottom group?
+          // this entry is guaranteed to be in the bottom group
+          // so immediately remove it.
           evictEntry(ce.key);
           numRemoved++;
+
+          // removal not necessary on last pass.
+          // eset[i] = eset[eSize-1];
+          // eSize--;
+        } else {
+          // This entry *could* be in the bottom group.
+          // add it to the priority queue
+
+          // everything in the priority queue will be removed, so keep track of
+          // the lowest value that ever comes back out of the queue.
+
+          // first reduce the size of the priority queue to account for
+          // the number of items we have already removed while executing
+          // this loop so far.
+          queue.myMaxSize = sz - lowerWaterMark - numRemoved;
+          while (queue.size() > queue.myMaxSize && queue.size() > 0) {
+            CacheEntry otherEntry = queue.pop();
+            newOldestEntry = Math.min(otherEntry.lastAccessedCopy, newOldestEntry);
+          }
+          if (queue.myMaxSize <= 0) break;
+
+          Object o = queue.myInsertWithOverflow(ce);
+          if (o != null) {
+            newOldestEntry = Math.min(((CacheEntry)o).lastAccessedCopy, newOldestEntry);
+          }
         }
+      }
 
-        // System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " initialQueueSize="+ wantToRemove + " finalQueueSize=" + queue.size() + " sz-numRemoved=" + (sz-numRemoved));
+      // Now delete everything in the priority queue.
+      // avoid using pop() since order doesn't matter anymore
+      for (CacheEntry<K,V> ce : queue.getValues()) {
+        if (ce==null) continue;
+        evictEntry(ce.key);
+        numRemoved++;
       }
 
-      oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry;
-      this.oldestEntry = oldestEntry;
-    } finally {
-      isCleaning = false;  // set before markAndSweep.unlock() for visibility
-      markAndSweepLock.unlock();
+      // System.out.println("items removed:" + numRemoved + " numKept=" + numKept + " initialQueueSize="+ wantToRemove + " finalQueueSize=" + queue.size() + " sz-numRemoved=" + (sz-numRemoved));
     }
+
+    oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry : newOldestEntry;
+    this.oldestEntry = oldestEntry;
   }
 
   private static class PQueue<K,V> extends PriorityQueue<CacheEntry<K,V>> {
@@ -477,7 +563,9 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
     return map;
   }
 
-  public static class CacheEntry<K,V> implements Comparable<CacheEntry<K,V>> {
+  public static class CacheEntry<K,V> implements Comparable<CacheEntry<K,V>>, Accountable {
+    public static long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOf(CacheEntry.class);
+
     K key;
     V value;
     volatile long lastAccessed = 0;
@@ -514,6 +602,27 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
     public String toString() {
       return "key: " + key + " value: " + value + " lastAccessed:" + lastAccessed;
     }
+
+    @Override
+    public long ramBytesUsed() {
+      long ramBytes = BASE_RAM_BYTES_USED;
+      if (key instanceof Accountable) {
+        ramBytes += ((Accountable) key).ramBytesUsed();
+      } else  {
+        ramBytes += LRUCache.DEFAULT_RAM_BYTES_USED;
+      }
+      if (value instanceof Accountable) {
+        ramBytes += ((Accountable) value).ramBytesUsed();
+      } else  {
+        ramBytes += LRUCache.DEFAULT_RAM_BYTES_USED;
+      }
+      return ramBytes;
+    }
+
+    @Override
+    public Collection<Accountable> getChildResources() {
+      return Collections.emptyList();
+    }
   }
 
  private boolean isDestroyed =  false;
@@ -632,4 +741,14 @@ public class ConcurrentLRUCache<K,V> implements Cache<K,V> {
       super.finalize();
     }
   }
+
+  @Override
+  public long ramBytesUsed() {
+    return BASE_RAM_BYTES_USED + ramBytes.get();
+  }
+
+  @Override
+  public Collection<Accountable> getChildResources() {
+    return Collections.emptyList();
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53dad4f0/solr/server/solr/configsets/basic_configs/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/basic_configs/conf/solrconfig.xml b/solr/server/solr/configsets/basic_configs/conf/solrconfig.xml
index c270f58..440a2f0 100644
--- a/solr/server/solr/configsets/basic_configs/conf/solrconfig.xml
+++ b/solr/server/solr/configsets/basic_configs/conf/solrconfig.xml
@@ -436,6 +436,9 @@
                the cache.  (see java.util.HashMap)
            autowarmCount - the number of entries to prepopulate from
                and old cache.
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy. Note that when this option is specified, the size
+                      and initialSize parameters are ignored.
       -->
     <filterCache class="solr.FastLRUCache"
                  size="512"

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53dad4f0/solr/server/solr/configsets/data_driven_schema_configs/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/data_driven_schema_configs/conf/solrconfig.xml b/solr/server/solr/configsets/data_driven_schema_configs/conf/solrconfig.xml
index 66b913e..b3fd7b1 100644
--- a/solr/server/solr/configsets/data_driven_schema_configs/conf/solrconfig.xml
+++ b/solr/server/solr/configsets/data_driven_schema_configs/conf/solrconfig.xml
@@ -436,6 +436,9 @@
                the cache.  (see java.util.HashMap)
            autowarmCount - the number of entries to prepopulate from
                and old cache.
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy. Note that when this option is specified, the size
+                      and initialSize parameters are ignored.
       -->
     <filterCache class="solr.FastLRUCache"
                  size="512"

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53dad4f0/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml b/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
index ee46c56..17aa971 100644
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
+++ b/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
@@ -449,7 +449,10 @@
            initialSize - the initial capacity (number of entries) of
                the cache.  (see java.util.HashMap)
            autowarmCount - the number of entries to prepopulate from
-               and old cache.  
+               and old cache.
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy. Note that when this option is specified, the size
+                      and initialSize parameters are ignored.
       -->
     <filterCache class="solr.FastLRUCache"
                  size="512"