You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@myfaces.apache.org by lu...@apache.org on 2012/03/03 16:52:56 UTC

svn commit: r1296647 - in /myfaces/core/branches/2.0.x: impl/src/main/java/org/apache/myfaces/lifecycle/ shared/src/main/java/org/apache/myfaces/shared/application/ shared/src/main/java/org/apache/myfaces/shared/resource/ shared/src/main/java/org/apach...

Author: lu4242
Date: Sat Mar  3 15:52:55 2012
New Revision: 1296647

URL: http://svn.apache.org/viewvc?rev=1296647&view=rev
Log:
MYFACES-3484 [perf] Use solr ConcurrentLRUCache instead Collections.synchronizedMap

Added:
    myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/util/ConcurrentLRUCache.java   (with props)
    myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/util/PriorityQueue.java   (with props)
Modified:
    myfaces/core/branches/2.0.x/impl/src/main/java/org/apache/myfaces/lifecycle/DefaultRestoreViewSupport.java
    myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/application/DefaultViewHandlerSupport.java
    myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/resource/ResourceHandlerCache.java

Modified: myfaces/core/branches/2.0.x/impl/src/main/java/org/apache/myfaces/lifecycle/DefaultRestoreViewSupport.java
URL: http://svn.apache.org/viewvc/myfaces/core/branches/2.0.x/impl/src/main/java/org/apache/myfaces/lifecycle/DefaultRestoreViewSupport.java?rev=1296647&r1=1296646&r2=1296647&view=diff
==============================================================================
--- myfaces/core/branches/2.0.x/impl/src/main/java/org/apache/myfaces/lifecycle/DefaultRestoreViewSupport.java (original)
+++ myfaces/core/branches/2.0.x/impl/src/main/java/org/apache/myfaces/lifecycle/DefaultRestoreViewSupport.java Sat Mar  3 15:52:55 2012
@@ -19,8 +19,6 @@
 package org.apache.myfaces.lifecycle;
 
 import java.net.MalformedURLException;
-import java.util.Collections;
-import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.logging.Level;
 import java.util.logging.Logger;
@@ -43,6 +41,7 @@ import org.apache.myfaces.buildtools.mav
 import org.apache.myfaces.shared.application.FacesServletMapping;
 import org.apache.myfaces.shared.application.InvalidViewIdException;
 import org.apache.myfaces.shared.util.Assert;
+import org.apache.myfaces.shared.util.ConcurrentLRUCache;
 import org.apache.myfaces.shared.util.ExternalContextUtils;
 
 /**
@@ -68,17 +67,20 @@ public class DefaultRestoreViewSupport i
     //private final Log log = LogFactory.getLog(DefaultRestoreViewSupport.class);
     private final Logger log = Logger.getLogger(DefaultRestoreViewSupport.class.getName());
 
-    @JSFWebConfigParam(defaultValue = "500", since = "2.0.2", group="viewhandler", tags="performance", classType="java.lang.Integer")
+    @JSFWebConfigParam(defaultValue = "500", since = "2.0.2", group="viewhandler",
+                       tags="performance", classType="java.lang.Integer")
     private static final String CHECKED_VIEWID_CACHE_SIZE_ATTRIBUTE = "org.apache.myfaces.CHECKED_VIEWID_CACHE_SIZE";
     private static final int CHECKED_VIEWID_CACHE_DEFAULT_SIZE = 500;
 
-    @JSFWebConfigParam(defaultValue = "true", since = "2.0.2", group="viewhandler", expectedValues="true,false", tags="performance")
-    private static final String CHECKED_VIEWID_CACHE_ENABLED_ATTRIBUTE = "org.apache.myfaces.CHECKED_VIEWID_CACHE_ENABLED";
+    @JSFWebConfigParam(defaultValue = "true", since = "2.0.2", group="viewhandler",
+                       expectedValues="true,false", tags="performance")
+    private static final String CHECKED_VIEWID_CACHE_ENABLED_ATTRIBUTE
+            = "org.apache.myfaces.CHECKED_VIEWID_CACHE_ENABLED";
     private static final boolean CHECKED_VIEWID_CACHE_ENABLED_DEFAULT = true;
     
     private static final String SKIP_ITERATION_HINT = "javax.faces.visit.SKIP_ITERATION";
 
-    private Map<String, Boolean> _checkedViewIdMap = null;
+    private volatile ConcurrentLRUCache<String, Boolean> _checkedViewIdMap = null;
     private Boolean _checkedViewIdCacheEnabled = null;
     
     private RenderKitFactory _renderKitFactory = null;
@@ -136,8 +138,8 @@ public class DefaultRestoreViewSupport i
             {
                 if (traceEnabled)
                 {
-                    log.finest("Calculated viewId '" + viewId + "' from request param '" + JAVAX_SERVLET_INCLUDE_PATH_INFO
-                            + "'");
+                    log.finest("Calculated viewId '" + viewId + "' from request param '"
+                               + JAVAX_SERVLET_INCLUDE_PATH_INFO + "'");
                 }
             }
             else
@@ -181,7 +183,8 @@ public class DefaultRestoreViewSupport i
     {
         ViewHandler viewHandler = facesContext.getApplication().getViewHandler();
         String renderkitId = viewHandler.calculateRenderKitId(facesContext);
-        ResponseStateManager rsm = getRenderKitFactory().getRenderKit(facesContext, renderkitId).getResponseStateManager();
+        ResponseStateManager rsm
+                = getRenderKitFactory().getRenderKit(facesContext, renderkitId).getResponseStateManager();
         return rsm.isPostback(facesContext);
     }
     
@@ -281,7 +284,8 @@ public class DefaultRestoreViewSupport i
     
     protected String[] getFaceletsViewMappings(FacesContext context)
     {
-        String faceletsViewMappings= context.getExternalContext().getInitParameter(ViewHandler.FACELETS_VIEW_MAPPINGS_PARAM_NAME);
+        String faceletsViewMappings
+                = context.getExternalContext().getInitParameter(ViewHandler.FACELETS_VIEW_MAPPINGS_PARAM_NAME);
         if(faceletsViewMappings == null)    //consider alias facelets.VIEW_MAPPINGS
         {
             faceletsViewMappings= context.getExternalContext().getInitParameter("facelets.VIEW_MAPPINGS");
@@ -299,7 +303,8 @@ public class DefaultRestoreViewSupport i
      */
     protected String handlePrefixMapping(String viewId, String prefix)
     {
-        /*  If prefix mapping (such as "/faces/*") is used for FacesServlet, normalize the viewId according to the following
+        /*  If prefix mapping (such as "/faces/*") is used for FacesServlet,
+        normalize the viewId according to the following
             algorithm, or its semantic equivalent, and return it.
                
             Remove any number of occurrences of the prefix mapping from the viewId. For example, if the incoming value
@@ -310,9 +315,15 @@ public class DefaultRestoreViewSupport i
         while (uri.startsWith(prefix) || uri.startsWith("//")) 
         {
             if(uri.startsWith(prefix))
-                    uri = uri.substring(prefix.length() - 1);    //cut off only /faces, leave the trailing '/' char for the next iteration
+            {
+                //cut off only /faces, leave the trailing '/' char for the next iteration
+                uri = uri.substring(prefix.length() - 1);
+            }
             else //uri starts with '//'
-                uri = uri.substring(1); //cut off the leading slash, leaving the second slash to compare for the next iteration
+            {
+                //cut off the leading slash, leaving the second slash to compare for the next iteration
+                uri = uri.substring(1);
+            }
         }
         //now delete any remaining leading '/'
         // TODO: CJH: I don't think this is correct, considering that getActionURL() expects everything to
@@ -374,14 +385,18 @@ public class DefaultRestoreViewSupport i
                         builder.replace(candidateViewId.lastIndexOf('.'), candidateViewId.length(), mapping);
                         String tempViewId = builder.toString();
                         if(checkResourceExists(context,tempViewId))
+                        {
                             return tempViewId;
+                        }
                     }
                 }
             }
 
             // forced facelets mappings did not match or there were no entries in faceletsViewMappings array
             if(checkResourceExists(context,candidateViewId))
+            {
                 return candidateViewId;
+            }
         
         }
         
@@ -413,12 +428,16 @@ public class DefaultRestoreViewSupport i
             
             String candidateViewId = builder.toString();
             if(checkResourceExists(context,candidateViewId))
+            {
                 return candidateViewId;
+            }
         }
 
         // Otherwise, if a physical resource exists with the name requestViewId let that value be viewId.
         if(checkResourceExists(context,requestViewId))
+        {
             return requestViewId;
+        }
         
         //Otherwise return null.
         return null;
@@ -519,11 +538,12 @@ public class DefaultRestoreViewSupport i
         }
     }
     
-    private Map<String, Boolean> getCheckedViewIDMap(FacesContext context)
+    private ConcurrentLRUCache<String, Boolean> getCheckedViewIDMap(FacesContext context)
     {
         if (_checkedViewIdMap == null)
         {
-            _checkedViewIdMap = Collections.synchronizedMap(new _CheckedViewIDMap<String, Boolean>(getViewIDCacheMaxSize(context)));
+            int maxSize = getViewIDCacheMaxSize(context);
+            _checkedViewIdMap = new ConcurrentLRUCache<String, Boolean>((maxSize * 4 + 3) / 3, maxSize);
         }
         return _checkedViewIdMap;
     }
@@ -535,7 +555,8 @@ public class DefaultRestoreViewSupport i
             //first, check to make sure that ProjectStage is production, if not, skip caching
             if (!context.isProjectStage(ProjectStage.Production))
             {
-                return _checkedViewIdCacheEnabled = Boolean.FALSE;
+                _checkedViewIdCacheEnabled = Boolean.FALSE;
+                return _checkedViewIdCacheEnabled;
             }
 
             //if in production, make sure that the cache is not explicitly disabled via context param
@@ -563,22 +584,4 @@ public class DefaultRestoreViewSupport i
                 : Integer.parseInt(configParam);
     }
 
-    private class _CheckedViewIDMap<K, V> extends LinkedHashMap<K, V>
-    {
-        private static final long serialVersionUID = 1L;
-        private int maxCapacity;
-
-        public _CheckedViewIDMap(int cacheSize)
-        {
-            // create map at max capacity and 1.1 load factor to avoid rehashing
-            super(cacheSize + 1, 1.1f, true);
-            maxCapacity = cacheSize;
-        }
-
-        @Override
-        protected boolean removeEldestEntry(Map.Entry<K, V> eldest)
-        {
-            return size() > maxCapacity;
-        }
-    }
 }

Modified: myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/application/DefaultViewHandlerSupport.java
URL: http://svn.apache.org/viewvc/myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/application/DefaultViewHandlerSupport.java?rev=1296647&r1=1296646&r2=1296647&view=diff
==============================================================================
--- myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/application/DefaultViewHandlerSupport.java (original)
+++ myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/application/DefaultViewHandlerSupport.java Sat Mar  3 15:52:55 2012
@@ -19,8 +19,6 @@
 package org.apache.myfaces.shared.application;
 
 import java.net.MalformedURLException;
-import java.util.Collections;
-import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.logging.Level;
 import java.util.logging.Logger;
@@ -32,6 +30,7 @@ import javax.faces.context.FacesContext;
 
 import org.apache.myfaces.buildtools.maven2.plugin.builder.annotation.JSFWebConfigParam;
 import org.apache.myfaces.shared.renderkit.html.util.SharedStringBuilder;
+import org.apache.myfaces.shared.util.ConcurrentLRUCache;
 import org.apache.myfaces.shared.util.ExternalContextUtils;
 import org.apache.myfaces.shared.util.StringUtils;
 import org.apache.myfaces.shared.util.WebConfigParamUtils;
@@ -77,7 +76,7 @@ public class DefaultViewHandlerSupport i
     
     private static final String VIEW_HANDLER_SUPPORT_SB = "oam.viewhandler.SUPPORT_SB";
 
-    private Map<String, Boolean> _checkedViewIdMap = null;
+    private volatile ConcurrentLRUCache<String, Boolean> _checkedViewIdMap = null;
     private Boolean _checkedViewIdCacheEnabled = null;
 
     public String calculateViewId(FacesContext context, String viewId)
@@ -543,12 +542,12 @@ public class DefaultViewHandlerSupport i
         return false;
     }
 
-    private Map<String, Boolean> getCheckedViewIDMap(FacesContext context)
+    private ConcurrentLRUCache<String, Boolean> getCheckedViewIDMap(FacesContext context)
     {
         if (_checkedViewIdMap == null)
         {
-            _checkedViewIdMap = Collections.synchronizedMap(
-                    new _CheckedViewIDMap<String, Boolean>(getViewIDCacheMaxSize(context)));
+            int maxSize = getViewIDCacheMaxSize(context);
+            _checkedViewIdMap = new ConcurrentLRUCache<String, Boolean>((maxSize * 4 + 3) / 3, maxSize);
         }
         return _checkedViewIdMap;
     }
@@ -586,23 +585,4 @@ public class DefaultViewHandlerSupport i
         return WebConfigParamUtils.getIntegerInitParameter(externalContext,
                 CHECKED_VIEWID_CACHE_SIZE_ATTRIBUTE, CHECKED_VIEWID_CACHE_DEFAULT_SIZE);
     }
-
-    private class _CheckedViewIDMap<K, V> extends LinkedHashMap<K, V>
-    {
-        private static final long serialVersionUID = 1L;
-        private int maxCapacity;
-
-        public _CheckedViewIDMap(int cacheSize)
-        {
-            // create map at max capacity and 1.1 load factor to avoid rehashing
-            super(cacheSize + 1, 1.1f, true);
-            maxCapacity = cacheSize;
-        }
-
-        @Override
-        protected boolean removeEldestEntry(Map.Entry<K, V> eldest)
-        {
-            return size() > maxCapacity;
-        }
-    }
 }

Modified: myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/resource/ResourceHandlerCache.java
URL: http://svn.apache.org/viewvc/myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/resource/ResourceHandlerCache.java?rev=1296647&r1=1296646&r2=1296647&view=diff
==============================================================================
--- myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/resource/ResourceHandlerCache.java (original)
+++ myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/resource/ResourceHandlerCache.java Sat Mar  3 15:52:55 2012
@@ -18,17 +18,16 @@
  */
 package org.apache.myfaces.shared.resource;
 
-import org.apache.myfaces.buildtools.maven2.plugin.builder.annotation.JSFWebConfigParam;
-import org.apache.myfaces.shared.util.WebConfigParamUtils;
+import java.util.logging.Level;
+import java.util.logging.Logger;
 
 import javax.faces.application.ProjectStage;
 import javax.faces.context.ExternalContext;
 import javax.faces.context.FacesContext;
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.logging.Level;
-import java.util.logging.Logger;
+
+import org.apache.myfaces.buildtools.maven2.plugin.builder.annotation.JSFWebConfigParam;
+import org.apache.myfaces.shared.util.ConcurrentLRUCache;
+import org.apache.myfaces.shared.util.WebConfigParamUtils;
 
 public class ResourceHandlerCache
 {
@@ -36,34 +35,43 @@ public class ResourceHandlerCache
             .getLogger(ResourceHandlerCache.class.getName());
 
     private Boolean _resourceCacheEnabled = null;
-    private Map<ResourceKey, ResourceValue> _resourceCacheMap = null;
+    private volatile ConcurrentLRUCache<ResourceKey, ResourceValue> _resourceCacheMap = null;
 
     /**
      * Controls the size of the cache used to check if a resource exists or not. 
      * 
      * <p>See org.apache.myfaces.RESOURCE_HANDLER_CACHE_ENABLED for details.</p>
      */
-    @JSFWebConfigParam(defaultValue = "500", since = "2.0.2", group="resources", classType="java.lang.Integer", tags="performance")
-    private static final String RESOURCE_HANDLER_CACHE_SIZE_ATTRIBUTE = "org.apache.myfaces.RESOURCE_HANDLER_CACHE_SIZE";
+    @JSFWebConfigParam(defaultValue = "500", since = "2.0.2", group="resources", 
+            classType="java.lang.Integer", tags="performance")
+    private static final String RESOURCE_HANDLER_CACHE_SIZE_ATTRIBUTE = 
+        "org.apache.myfaces.RESOURCE_HANDLER_CACHE_SIZE";
     private static final int RESOURCE_HANDLER_CACHE_DEFAULT_SIZE = 500;
 
     /**
-     * Enable or disable the cache used to "remember" if a resource handled by the default ResourceHandler exists or not.
+     * Enable or disable the cache used to "remember" if a resource handled by 
+     * the default ResourceHandler exists or not.
      * 
      */
-    @JSFWebConfigParam(defaultValue = "true", since = "2.0.2", group="resources", expectedValues="true,false", tags="performance")
-    private static final String RESOURCE_HANDLER_CACHE_ENABLED_ATTRIBUTE = "org.apache.myfaces.RESOURCE_HANDLER_CACHE_ENABLED";
+    @JSFWebConfigParam(defaultValue = "true", since = "2.0.2", group="resources", 
+            expectedValues="true,false", tags="performance")
+    private static final String RESOURCE_HANDLER_CACHE_ENABLED_ATTRIBUTE = 
+        "org.apache.myfaces.RESOURCE_HANDLER_CACHE_ENABLED";
     private static final boolean RESOURCE_HANDLER_CACHE_ENABLED_DEFAULT = true;
 
     public ResourceValue getResource(String resourceName, String libraryName,
             String contentType, String localePrefix)
     {
         if (!isResourceCachingEnabled() || _resourceCacheMap == null)
+        {
             return null;
+        }
 
         if (log.isLoggable(Level.FINE))
+        {
             log.log(Level.FINE, "Attemping to get resource from cache for "
                     + resourceName);
+        }
 
         ResourceKey key = new ResourceKey(resourceName, libraryName, contentType, localePrefix);
 
@@ -73,29 +81,37 @@ public class ResourceHandlerCache
     public boolean containsResource(String resourceName, String libraryName, String contentType, String localePrefix)
     {
         if (!isResourceCachingEnabled() || _resourceCacheMap == null)
+        {
             return false;
+        }
 
         ResourceKey key = new ResourceKey(resourceName, libraryName, contentType, localePrefix);
-        return _resourceCacheMap.containsKey(key);
+        return _resourceCacheMap.get(key) != null;
     }
 
     public void putResource(String resourceName, String libraryName,
             String contentType, String localePrefix, ResourceMeta resource, ResourceLoader loader)
     {
         if (!isResourceCachingEnabled())
+        {
             return;
+        }
 
         if (log.isLoggable(Level.FINE))
+        {
             log.log(Level.FINE, "Attemping to put resource to cache for "
                     + resourceName);
+        }
 
         if (_resourceCacheMap == null)
         {
             if (log.isLoggable(Level.FINE))
+            {
                 log.log(Level.FINE, "Initializing resource cache map");
-            _resourceCacheMap = Collections
-                    .synchronizedMap(new _ResourceMap<ResourceKey, ResourceValue>(
-                            getMaxSize()));
+            }
+            int maxSize = getMaxSize();
+            _resourceCacheMap = new ConcurrentLRUCache<ResourceKey, ResourceValue>(
+                    (maxSize * 4 + 3) / 3, maxSize);
         }
 
         _resourceCacheMap.put(new ResourceKey(resourceName, libraryName,
@@ -111,12 +127,15 @@ public class ResourceHandlerCache
             //first, check to make sure that ProjectStage is production, if not, skip caching
             if (!facesContext.isProjectStage(ProjectStage.Production))
             {
-                return _resourceCacheEnabled = Boolean.FALSE;
+                _resourceCacheEnabled = Boolean.FALSE;
+                return _resourceCacheEnabled;
             }
 
             ExternalContext externalContext = facesContext.getExternalContext();
             if (externalContext == null)
+            {
                 return false; //don't cache right now, but don't disable it yet either
+            }
 
             //if in production, make sure that the cache is not explicitly disabled via context param
             _resourceCacheEnabled = WebConfigParamUtils.getBooleanInitParameter(externalContext, 
@@ -227,22 +246,4 @@ public class ResourceHandlerCache
         }
     }
 
-    private static class _ResourceMap<K, V> extends LinkedHashMap<K, V>
-    {
-        private static final long serialVersionUID = 1L;
-        private int maxCapacity;
-
-        public _ResourceMap(int cacheSize)
-        {
-            // create map at max capacity and 1.1 load factor to avoid rehashing
-            super(cacheSize + 1, 1.1f, true);
-            maxCapacity = cacheSize;
-        }
-
-        @Override
-        protected boolean removeEldestEntry(Map.Entry<K, V> eldest)
-        {
-            return size() > maxCapacity;
-        }
-    }
 }

Added: myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/util/ConcurrentLRUCache.java
URL: http://svn.apache.org/viewvc/myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/util/ConcurrentLRUCache.java?rev=1296647&view=auto
==============================================================================
--- myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/util/ConcurrentLRUCache.java (added)
+++ myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/util/ConcurrentLRUCache.java Sat Mar  3 15:52:55 2012
@@ -0,0 +1,829 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.myfaces.shared.util;
+
+import java.lang.ref.WeakReference;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * A LRU cache implementation based upon ConcurrentHashMap and other techniques to reduce
+ * contention and synchronization overhead to utilize multiple CPU cores more effectively.
+ * <p/>
+ * Note that the implementation does not follow a true LRU (least-recently-used) eviction
+ * strategy. Instead it strives to remove least recently used items but when the initial
+ * cleanup does not remove enough items to reach the 'acceptableWaterMark' limit, it can
+ * remove more items forcefully regardless of access order.
+ *
+ *
+ * @since solr 1.4
+ * @see org.apache.solr.util.ConcurrentLRUCache
+ */
+public class ConcurrentLRUCache<K, V>
+{
+    //private static Logger log = Logger.getLogger(ConcurrentLRUCache.class
+    //        .getName());
+
+    private final ConcurrentHashMap<Object, CacheEntry<K, V>> map;
+    private final int upperWaterMark;
+    private final int lowerWaterMark;
+    private final ReentrantLock markAndSweepLock = new ReentrantLock(true);
+    private boolean isCleaning = false; // not volatile... piggybacked on other volatile vars
+    private final boolean newThreadForCleanup;
+    private volatile boolean islive = true;
+    private final Stats stats = new Stats();
+    private final int acceptableWaterMark;
+    private long oldestEntry = 0; // not volatile, only accessed in the cleaning method
+    private final EvictionListener<K, V> evictionListener;
+    private CleanupThread cleanupThread;
+
+    public ConcurrentLRUCache(int upperWaterMark, final int lowerWaterMark,
+            int acceptableWatermark, int initialSize, boolean runCleanupThread,
+            boolean runNewThreadForCleanup,
+            EvictionListener<K, V> evictionListener)
+    {
+        if (upperWaterMark < 1)
+        {
+            throw new IllegalArgumentException("upperWaterMark must be > 0");
+        }
+        if (lowerWaterMark >= upperWaterMark)
+        {
+            throw new IllegalArgumentException(
+                    "lowerWaterMark must be  < upperWaterMark");
+        }
+        map = new ConcurrentHashMap<Object, CacheEntry<K, V>>(initialSize);
+        newThreadForCleanup = runNewThreadForCleanup;
+        this.upperWaterMark = upperWaterMark;
+        this.lowerWaterMark = lowerWaterMark;
+        this.acceptableWaterMark = acceptableWatermark;
+        this.evictionListener = evictionListener;
+        if (runCleanupThread)
+        {
+            cleanupThread = new CleanupThread(this);
+            cleanupThread.start();
+        }
+    }
+
+    public ConcurrentLRUCache(int size, int lowerWatermark)
+    {
+        this(size, lowerWatermark, (int) Math
+                .floor((lowerWatermark + size) / 2), (int) Math
+                .ceil(0.75 * size), false, false, null);
+    }
+
+    public void setAlive(boolean live)
+    {
+        islive = live;
+    }
+
+    public V get(K key)
+    {
+        CacheEntry<K, V> e = map.get(key);
+        if (e == null)
+        {
+            if (islive)
+            {
+                stats.missCounter.incrementAndGet();
+            }
+            return null;
+        }
+        if (islive)
+        {
+            e.lastAccessed = stats.accessCounter.incrementAndGet();
+        }
+        return e.value;
+    }
+
+    public V remove(K key)
+    {
+        CacheEntry<K, V> cacheEntry = map.remove(key);
+        if (cacheEntry != null)
+        {
+            stats.size.decrementAndGet();
+            return cacheEntry.value;
+        }
+        return null;
+    }
+
+    public V put(K key, V val)
+    {
+        if (val == null)
+        {
+            return null;
+        }
+        CacheEntry<K, V> e = new CacheEntry<K, V>(key, val,
+                stats.accessCounter.incrementAndGet());
+        CacheEntry<K, V> oldCacheEntry = map.put(key, e);
+        int currentSize;
+        if (oldCacheEntry == null)
+        {
+            currentSize = stats.size.incrementAndGet();
+        }
+        else
+        {
+            currentSize = stats.size.get();
+        }
+        if (islive)
+        {
+            stats.putCounter.incrementAndGet();
+        }
+        else
+        {
+            stats.nonLivePutCounter.incrementAndGet();
+        }
+
+        // Check if we need to clear out old entries from the cache.
+        // isCleaning variable is checked instead of markAndSweepLock.isLocked()
+        // for performance because every put invokation will check until
+        // the size is back to an acceptable level.
+        //
+        // There is a race between the check and the call to markAndSweep, but
+        // it's unimportant because markAndSweep actually aquires the lock or returns if it can't.
+        //
+        // Thread safety note: isCleaning read is piggybacked (comes after) other volatile reads
+        // in this method.
+        if (currentSize > upperWaterMark && !isCleaning)
+        {
+            if (newThreadForCleanup)
+            {
+                new Thread()
+                {
+                    @Override
+                    public void run()
+                    {
+                        markAndSweep();
+                    }
+                }.start();
+            }
+            else if (cleanupThread != null)
+            {
+                cleanupThread.wakeThread();
+            }
+            else
+            {
+                markAndSweep();
+            }
+        }
+        return oldCacheEntry == null ? null : oldCacheEntry.value;
+    }
+
+    /**
+     * Removes items from the cache to bring the size down
+     * to an acceptable value ('acceptableWaterMark').
+     * <p/>
+     * It is done in two stages. In the first stage, least recently used items are evicted.
+     * If, after the first stage, the cache size is still greater than 'acceptableSize'
+     * config parameter, the second stage takes over.
+     * <p/>
+     * The second stage is more intensive and tries to bring down the cache size
+     * to the 'lowerWaterMark' config parameter.
+     */
+    private void markAndSweep()
+    {
+        // if we want to keep at least 1000 entries, then timestamps of
+        // current through current-1000 are guaranteed not to be the oldest (but that does
+        // not mean there are 1000 entries in that group... it's acutally anywhere between
+        // 1 and 1000).
+        // Also, if we want to remove 500 entries, then
+        // oldestEntry through oldestEntry+500 are guaranteed to be
+        // removed (however many there are there).
+
+        if (!markAndSweepLock.tryLock())
+        {
+            return;
+        }
+        try
+        {
+            long oldestEntry = this.oldestEntry;
+            isCleaning = true;
+            this.oldestEntry = oldestEntry; // volatile write to make isCleaning visible
+
+            long timeCurrent = stats.accessCounter.get();
+            int sz = stats.size.get();
+
+            int numRemoved = 0;
+            int numKept = 0;
+            long newestEntry = timeCurrent;
+            long newNewestEntry = -1;
+            long newOldestEntry = Long.MAX_VALUE;
+
+            int wantToKeep = lowerWaterMark;
+            int wantToRemove = sz - lowerWaterMark;
+
+            @SuppressWarnings("unchecked")
+            // generic array's are anoying
+            CacheEntry<K, V>[] eset = new CacheEntry[sz];
+            int eSize = 0;
+
+            // System.out.println("newestEntry="+newestEntry + " oldestEntry="+oldestEntry);
+            // System.out.println("items removed:" + numRemoved + " numKept=" + numKept + 
+            //    " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
+
+            for (CacheEntry<K, V> ce : map.values())
+            {
+                // set lastAccessedCopy to avoid more volatile reads
+                ce.lastAccessedCopy = ce.lastAccessed;
+                long thisEntry = ce.lastAccessedCopy;
+
+                // since the wantToKeep group is likely to be bigger than wantToRemove, check it first
+                if (thisEntry > newestEntry - wantToKeep)
+                {
+                    // this entry is guaranteed not to be in the bottom
+                    // group, so do nothing.
+                    numKept++;
+                    newOldestEntry = Math.min(thisEntry, newOldestEntry);
+                }
+                else if (thisEntry < oldestEntry + wantToRemove)
+                { // entry in bottom group?
+                    // this entry is guaranteed to be in the bottom group
+                    // so immediately remove it from the map.
+                    evictEntry(ce.key);
+                    numRemoved++;
+                }
+                else
+                {
+                    // This entry *could* be in the bottom group.
+                    // Collect these entries to avoid another full pass... this is wasted
+                    // effort if enough entries are normally removed in this first pass.
+                    // An alternate impl could make a full second pass.
+                    if (eSize < eset.length - 1)
+                    {
+                        eset[eSize++] = ce;
+                        newNewestEntry = Math.max(thisEntry, newNewestEntry);
+                        newOldestEntry = Math.min(thisEntry, newOldestEntry);
+                    }
+                }
+            }
+
+            // System.out.println("items removed:" + numRemoved + " numKept=" + numKept + 
+            //    " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
+            // TODO: allow this to be customized in the constructor?
+            int numPasses = 1; // maximum number of linear passes over the data
+
+            // if we didn't remove enough entries, then make more passes
+            // over the values we collected, with updated min and max values.
+            while (sz - numRemoved > acceptableWaterMark && --numPasses >= 0)
+            {
+
+                oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry
+                        : newOldestEntry;
+                newOldestEntry = Long.MAX_VALUE;
+                newestEntry = newNewestEntry;
+                newNewestEntry = -1;
+                wantToKeep = lowerWaterMark - numKept;
+                wantToRemove = sz - lowerWaterMark - numRemoved;
+
+                // iterate backward to make it easy to remove items.
+                for (int i = eSize - 1; i >= 0; i--)
+                {
+                    CacheEntry<K, V> ce = eset[i];
+                    long thisEntry = ce.lastAccessedCopy;
+
+                    if (thisEntry > newestEntry - wantToKeep)
+                    {
+                        // this entry is guaranteed not to be in the bottom
+                        // group, so do nothing but remove it from the eset.
+                        numKept++;
+                        // remove the entry by moving the last element to it's position
+                        eset[i] = eset[eSize - 1];
+                        eSize--;
+
+                        newOldestEntry = Math.min(thisEntry, newOldestEntry);
+
+                    }
+                    else if (thisEntry < oldestEntry + wantToRemove)
+                    { // entry in bottom group?
+
+                        // this entry is guaranteed to be in the bottom group
+                        // so immediately remove it from the map.
+                        evictEntry(ce.key);
+                        numRemoved++;
+
+                        // remove the entry by moving the last element to it's position
+                        eset[i] = eset[eSize - 1];
+                        eSize--;
+                    }
+                    else
+                    {
+                        // This entry *could* be in the bottom group, so keep it in the eset,
+                        // and update the stats.
+                        newNewestEntry = Math.max(thisEntry, newNewestEntry);
+                        newOldestEntry = Math.min(thisEntry, newOldestEntry);
+                    }
+                }
+                // System.out.println("items removed:" + numRemoved + " numKept=" + 
+                //    numKept + " esetSz="+ eSize + " sz-numRemoved=" + (sz-numRemoved));
+            }
+
+            // if we still didn't remove enough entries, then make another pass while
+            // inserting into a priority queue
+            if (sz - numRemoved > acceptableWaterMark)
+            {
+
+                oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry
+                        : newOldestEntry;
+                newOldestEntry = Long.MAX_VALUE;
+                newestEntry = newNewestEntry;
+                newNewestEntry = -1;
+                wantToKeep = lowerWaterMark - numKept;
+                wantToRemove = sz - lowerWaterMark - numRemoved;
+
+                PQueue<K, V> queue = new PQueue<K, V>(wantToRemove);
+
+                for (int i = eSize - 1; i >= 0; i--)
+                {
+                    CacheEntry<K, V> ce = eset[i];
+                    long thisEntry = ce.lastAccessedCopy;
+
+                    if (thisEntry > newestEntry - wantToKeep)
+                    {
+                        // this entry is guaranteed not to be in the bottom
+                        // group, so do nothing but remove it from the eset.
+                        numKept++;
+                        // removal not necessary on last pass.
+                        // eset[i] = eset[eSize-1];
+                        // eSize--;
+
+                        newOldestEntry = Math.min(thisEntry, newOldestEntry);
+
+                    }
+                    else if (thisEntry < oldestEntry + wantToRemove)
+                    { // entry in bottom group?
+                        // this entry is guaranteed to be in the bottom group
+                        // so immediately remove it.
+                        evictEntry(ce.key);
+                        numRemoved++;
+
+                        // removal not necessary on last pass.
+                        // eset[i] = eset[eSize-1];
+                        // eSize--;
+                    }
+                    else
+                    {
+                        // This entry *could* be in the bottom group.
+                        // add it to the priority queue
+
+                        // everything in the priority queue will be removed, so keep track of
+                        // the lowest value that ever comes back out of the queue.
+
+                        // first reduce the size of the priority queue to account for
+                        // the number of items we have already removed while executing
+                        // this loop so far.
+                        queue.myMaxSize = sz - lowerWaterMark - numRemoved;
+                        while (queue.size() > queue.myMaxSize
+                                && queue.size() > 0)
+                        {
+                            CacheEntry otherEntry = (CacheEntry) queue.pop();
+                            newOldestEntry = Math
+                                    .min(otherEntry.lastAccessedCopy,
+                                            newOldestEntry);
+                        }
+                        if (queue.myMaxSize <= 0)
+                        {
+                            break;
+                        }
+
+                        Object o = queue.myInsertWithOverflow(ce);
+                        if (o != null)
+                        {
+                            newOldestEntry = Math.min(
+                                    ((CacheEntry) o).lastAccessedCopy,
+                                    newOldestEntry);
+                        }
+                    }
+                }
+
+                // Now delete everything in the priority queue.
+                // avoid using pop() since order doesn't matter anymore
+                for (CacheEntry<K, V> ce : queue.getValues())
+                {
+                    if (ce == null)
+                    {
+                        continue;
+                    }
+                    evictEntry(ce.key);
+                    numRemoved++;
+                }
+
+                // System.out.println("items removed:" + numRemoved + " numKept=" + numKept + 
+                //    " initialQueueSize="+ wantToRemove + " finalQueueSize=" + 
+                //      queue.size() + " sz-numRemoved=" + (sz-numRemoved));
+            }
+
+            oldestEntry = newOldestEntry == Long.MAX_VALUE ? oldestEntry
+                    : newOldestEntry;
+            this.oldestEntry = oldestEntry;
+        }
+        finally
+        {
+            isCleaning = false; // set before markAndSweep.unlock() for visibility
+            markAndSweepLock.unlock();
+        }
+    }
+
+    private static class PQueue<K, V> extends PriorityQueue<CacheEntry<K, V>>
+    {
+        int myMaxSize;
+        final Object[] heap;
+
+        PQueue(int maxSz)
+        {
+            super(maxSz);
+            heap = getHeapArray();
+            myMaxSize = maxSz;
+        }
+
+        @SuppressWarnings("unchecked")
+        Iterable<CacheEntry<K, V>> getValues()
+        {
+            return (Iterable) Collections.unmodifiableCollection(Arrays
+                    .asList(heap));
+        }
+
+        @Override
+        protected boolean lessThan(CacheEntry a, CacheEntry b)
+        {
+            // reverse the parameter order so that the queue keeps the oldest items
+            return b.lastAccessedCopy < a.lastAccessedCopy;
+        }
+
+        // necessary because maxSize is private in base class
+        @SuppressWarnings("unchecked")
+        public CacheEntry<K, V> myInsertWithOverflow(CacheEntry<K, V> element)
+        {
+            if (size() < myMaxSize)
+            {
+                add(element);
+                return null;
+            }
+            else if (size() > 0
+                    && !lessThan(element, (CacheEntry<K, V>) heap[1]))
+            {
+                CacheEntry<K, V> ret = (CacheEntry<K, V>) heap[1];
+                heap[1] = element;
+                updateTop();
+                return ret;
+            }
+            else
+            {
+                return element;
+            }
+        }
+    }
+
+    private void evictEntry(K key)
+    {
+        CacheEntry<K, V> o = map.remove(key);
+        if (o == null)
+        {
+            return;
+        }
+        stats.size.decrementAndGet();
+        stats.evictionCounter.incrementAndGet();
+        if (evictionListener != null)
+        {
+            evictionListener.evictedEntry(o.key, o.value);
+        }
+    }
+
+    /**
+     * Returns 'n' number of oldest accessed entries present in this cache.
+     *
+     * This uses a TreeSet to collect the 'n' oldest items ordered by ascending last access time
+     *  and returns a LinkedHashMap containing 'n' or less than 'n' entries.
+     * @param n the number of oldest items needed
+     * @return a LinkedHashMap containing 'n' or less than 'n' entries
+     */
+    public Map<K, V> getOldestAccessedItems(int n)
+    {
+        Map<K, V> result = new LinkedHashMap<K, V>();
+        if (n <= 0)
+        {
+            return result;
+        }
+        TreeSet<CacheEntry<K, V>> tree = new TreeSet<CacheEntry<K, V>>();
+        markAndSweepLock.lock();
+        try
+        {
+            for (Map.Entry<Object, CacheEntry<K, V>> entry : map.entrySet())
+            {
+                CacheEntry<K, V> ce = entry.getValue();
+                ce.lastAccessedCopy = ce.lastAccessed;
+                if (tree.size() < n)
+                {
+                    tree.add(ce);
+                }
+                else
+                {
+                    if (ce.lastAccessedCopy < tree.first().lastAccessedCopy)
+                    {
+                        tree.remove(tree.first());
+                        tree.add(ce);
+                    }
+                }
+            }
+        }
+        finally
+        {
+            markAndSweepLock.unlock();
+        }
+        for (CacheEntry<K, V> e : tree)
+        {
+            result.put(e.key, e.value);
+        }
+        return result;
+    }
+
+    public Map<K, V> getLatestAccessedItems(int n)
+    {
+        Map<K, V> result = new LinkedHashMap<K, V>();
+        if (n <= 0)
+        {
+            return result;
+        }
+        TreeSet<CacheEntry<K, V>> tree = new TreeSet<CacheEntry<K, V>>();
+        // we need to grab the lock since we are changing lastAccessedCopy
+        markAndSweepLock.lock();
+        try
+        {
+            for (Map.Entry<Object, CacheEntry<K, V>> entry : map.entrySet())
+            {
+                CacheEntry<K, V> ce = entry.getValue();
+                ce.lastAccessedCopy = ce.lastAccessed;
+                if (tree.size() < n)
+                {
+                    tree.add(ce);
+                }
+                else
+                {
+                    if (ce.lastAccessedCopy > tree.last().lastAccessedCopy)
+                    {
+                        tree.remove(tree.last());
+                        tree.add(ce);
+                    }
+                }
+            }
+        }
+        finally
+        {
+            markAndSweepLock.unlock();
+        }
+        for (CacheEntry<K, V> e : tree)
+        {
+            result.put(e.key, e.value);
+        }
+        return result;
+    }
+
+    public int size()
+    {
+        return stats.size.get();
+    }
+
+    public void clear()
+    {
+        map.clear();
+    }
+
+    public Map<Object, CacheEntry<K, V>> getMap()
+    {
+        return map;
+    }
+
+    private static class CacheEntry<K, V> implements
+            Comparable<CacheEntry<K, V>>
+    {
+        K key;
+        V value;
+        volatile long lastAccessed = 0;
+        long lastAccessedCopy = 0;
+
+        public CacheEntry(K key, V value, long lastAccessed)
+        {
+            this.key = key;
+            this.value = value;
+            this.lastAccessed = lastAccessed;
+        }
+
+        public void setLastAccessed(long lastAccessed)
+        {
+            this.lastAccessed = lastAccessed;
+        }
+
+        public int compareTo(CacheEntry<K, V> that)
+        {
+            if (this.lastAccessedCopy == that.lastAccessedCopy)
+            {
+                return 0;
+            }
+            return this.lastAccessedCopy < that.lastAccessedCopy ? 1 : -1;
+        }
+
+        @Override
+        public int hashCode()
+        {
+            return value.hashCode();
+        }
+
+        @Override
+        public boolean equals(Object obj)
+        {
+            return value.equals(obj);
+        }
+
+        @Override
+        public String toString()
+        {
+            return "key: " + key + " value: " + value + " lastAccessed:"
+                    + lastAccessed;
+        }
+    }
+
+    private boolean isDestroyed = false;
+
+    public void destroy()
+    {
+        try
+        {
+            if (cleanupThread != null)
+            {
+                cleanupThread.stopThread();
+            }
+        }
+        finally
+        {
+            isDestroyed = true;
+        }
+    }
+
+    public Stats getStats()
+    {
+        return stats;
+    }
+
+    public static class Stats
+    {
+        private final AtomicLong accessCounter = new AtomicLong(0);
+        private final AtomicLong putCounter = new AtomicLong(0);
+        private final AtomicLong nonLivePutCounter = new AtomicLong(0);
+        private final AtomicLong missCounter = new AtomicLong();
+        private final AtomicInteger size = new AtomicInteger();
+        private AtomicLong evictionCounter = new AtomicLong();
+
+        public long getCumulativeLookups()
+        {
+            return (accessCounter.get() - putCounter.get() - nonLivePutCounter
+                    .get()) + missCounter.get();
+        }
+
+        public long getCumulativeHits()
+        {
+            return accessCounter.get() - putCounter.get()
+                    - nonLivePutCounter.get();
+        }
+
+        public long getCumulativePuts()
+        {
+            return putCounter.get();
+        }
+
+        public long getCumulativeEvictions()
+        {
+            return evictionCounter.get();
+        }
+
+        public int getCurrentSize()
+        {
+            return size.get();
+        }
+
+        public long getCumulativeNonLivePuts()
+        {
+            return nonLivePutCounter.get();
+        }
+
+        public long getCumulativeMisses()
+        {
+            return missCounter.get();
+        }
+
+        public void add(Stats other)
+        {
+            accessCounter.addAndGet(other.accessCounter.get());
+            putCounter.addAndGet(other.putCounter.get());
+            nonLivePutCounter.addAndGet(other.nonLivePutCounter.get());
+            missCounter.addAndGet(other.missCounter.get());
+            evictionCounter.addAndGet(other.evictionCounter.get());
+            size.set(Math.max(size.get(), other.size.get()));
+        }
+    }
+
+    public static interface EvictionListener<K, V>
+    {
+        public void evictedEntry(K key, V value);
+    }
+
+    private static class CleanupThread extends Thread
+    {
+        private WeakReference<ConcurrentLRUCache> cache;
+
+        private boolean stop = false;
+
+        public CleanupThread(ConcurrentLRUCache c)
+        {
+            cache = new WeakReference<ConcurrentLRUCache>(c);
+        }
+
+        @Override
+        public void run()
+        {
+            while (true)
+            {
+                synchronized (this)
+                {
+                    if (stop)
+                    {
+                        break;
+                    }
+                    try
+                    {
+                        this.wait();
+                    }
+                    catch (InterruptedException e)
+                    {
+                    }
+                }
+                if (stop)
+                {
+                    break;
+                }
+                ConcurrentLRUCache c = cache.get();
+                if (c == null)
+                {
+                    break;
+                }
+                c.markAndSweep();
+            }
+        }
+
+        void wakeThread()
+        {
+            synchronized (this)
+            {
+                this.notify();
+            }
+        }
+
+        void stopThread()
+        {
+            synchronized (this)
+            {
+                stop = true;
+                this.notify();
+            }
+        }
+    }
+
+    @Override
+    protected void finalize() throws Throwable
+    {
+        try
+        {
+            if (!isDestroyed)
+            {
+                // This log message is useless, because it is not supposed to use
+                // thread cleanup strategy for this class.
+                //log.severe("ConcurrentLRUCache was not destroyed prior to finalize()," +
+                //        " indicates a bug -- POSSIBLE RESOURCE LEAK!!!");
+                destroy();
+            }
+        }
+        finally
+        {
+            super.finalize();
+        }
+    }
+}

Propchange: myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/util/ConcurrentLRUCache.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/util/PriorityQueue.java
URL: http://svn.apache.org/viewvc/myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/util/PriorityQueue.java?rev=1296647&view=auto
==============================================================================
--- myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/util/PriorityQueue.java (added)
+++ myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/util/PriorityQueue.java Sat Mar  3 15:52:55 2012
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.myfaces.shared.util;
+
+/** A PriorityQueue maintains a partial ordering of its elements such that the
+ * least element can always be found in constant time.  Put()'s and pop()'s
+ * require log(size) time.
+ *
+ * <p><b>NOTE</b>: This class will pre-allocate a full array of
+ * length <code>maxSize+1</code> if instantiated via the
+ * {@link #PriorityQueue(int,boolean)} constructor with
+ * <code>prepopulate</code> set to <code>true</code>.
+ * 
+ * @lucene.internal
+ * @see org.apache.lucene.util.PriorityQueue
+*/
+public abstract class PriorityQueue<T>
+{
+    private int size;
+    private final int maxSize;
+    private final T[] heap;
+
+    public PriorityQueue(int maxSize)
+    {
+        this(maxSize, true);
+    }
+
+    @SuppressWarnings("unchecked")
+    public PriorityQueue(int maxSize, boolean prepopulate)
+    {
+        size = 0;
+        int heapSize;
+        if (0 == maxSize)
+        {
+            // We allocate 1 extra to avoid if statement in top()
+            heapSize = 2;
+        }
+        else
+        {
+            if (maxSize == Integer.MAX_VALUE)
+            {
+                // Don't wrap heapSize to -1, in this case, which
+                // causes a confusing NegativeArraySizeException.
+                // Note that very likely this will simply then hit
+                // an OOME, but at least that's more indicative to
+                // caller that this values is too big.  We don't +1
+                // in this case, but it's very unlikely in practice
+                // one will actually insert this many objects into
+                // the PQ:
+                heapSize = Integer.MAX_VALUE;
+            }
+            else
+            {
+                // NOTE: we add +1 because all access to heap is
+                // 1-based not 0-based.  heap[0] is unused.
+                heapSize = maxSize + 1;
+            }
+        }
+        heap = (T[]) new Object[heapSize]; // T is unbounded type, so this unchecked cast works always
+        this.maxSize = maxSize;
+
+        if (prepopulate)
+        {
+            // If sentinel objects are supported, populate the queue with them
+            T sentinel = getSentinelObject();
+            if (sentinel != null)
+            {
+                heap[1] = sentinel;
+                for (int i = 2; i < heap.length; i++)
+                {
+                    heap[i] = getSentinelObject();
+                }
+                size = maxSize;
+            }
+        }
+    }
+
+    /** Determines the ordering of objects in this priority queue.  Subclasses
+     *  must define this one method.
+     *  @return <code>true</code> iff parameter <tt>a</tt> is less than parameter <tt>b</tt>.
+     */
+    protected abstract boolean lessThan(T a, T b);
+
+    /**
+     * This method can be overridden by extending classes to return a sentinel
+     * object which will be used by the {@link PriorityQueue#PriorityQueue(int,boolean)} 
+     * constructor to fill the queue, so that the code which uses that queue can always
+     * assume it's full and only change the top without attempting to insert any new
+     * object.<br>
+     * 
+     * Those sentinel values should always compare worse than any non-sentinel
+     * value (i.e., {@link #lessThan} should always favor the
+     * non-sentinel values).<br>
+     * 
+     * By default, this method returns false, which means the queue will not be
+     * filled with sentinel values. Otherwise, the value returned will be used to
+     * pre-populate the queue. Adds sentinel values to the queue.<br>
+     * 
+     * If this method is extended to return a non-null value, then the following
+     * usage pattern is recommended:
+     * 
+     * <pre>
+     * // extends getSentinelObject() to return a non-null value.
+     * PriorityQueue<MyObject> pq = new MyQueue<MyObject>(numHits);
+     * // save the 'top' element, which is guaranteed to not be null.
+     * MyObject pqTop = pq.top();
+     * &lt;...&gt;
+     * // now in order to add a new element, which is 'better' than top (after 
+     * // you've verified it is better), it is as simple as:
+     * pqTop.change().
+     * pqTop = pq.updateTop();
+     * </pre>
+     * 
+     * <b>NOTE:</b> if this method returns a non-null value, it will be called by
+     * the {@link PriorityQueue#PriorityQueue(int,boolean)} constructor 
+     * {@link #size()} times, relying on a new object to be returned and will not
+     * check if it's null again. Therefore you should ensure any call to this
+     * method creates a new instance and behaves consistently, e.g., it cannot
+     * return null if it previously returned non-null.
+     * 
+     * @return the sentinel object to use to pre-populate the queue, or null if
+     *         sentinel objects are not supported.
+     */
+    protected T getSentinelObject()
+    {
+        return null;
+    }
+
+    /**
+     * Adds an Object to a PriorityQueue in log(size) time. If one tries to add
+     * more objects than maxSize from initialize an
+     * {@link ArrayIndexOutOfBoundsException} is thrown.
+     * 
+     * @return the new 'top' element in the queue.
+     */
+    public final T add(T element)
+    {
+        size++;
+        heap[size] = element;
+        upHeap();
+        return heap[1];
+    }
+
+    /**
+     * Adds an Object to a PriorityQueue in log(size) time.
+     * It returns the object (if any) that was
+     * dropped off the heap because it was full. This can be
+     * the given parameter (in case it is smaller than the
+     * full heap's minimum, and couldn't be added), or another
+     * object that was previously the smallest value in the
+     * heap and now has been replaced by a larger one, or null
+     * if the queue wasn't yet full with maxSize elements.
+     */
+    public T insertWithOverflow(T element)
+    {
+        if (size < maxSize)
+        {
+            add(element);
+            return null;
+        }
+        else if (size > 0 && !lessThan(element, heap[1]))
+        {
+            T ret = heap[1];
+            heap[1] = element;
+            updateTop();
+            return ret;
+        }
+        else
+        {
+            return element;
+        }
+    }
+
+    /** Returns the least element of the PriorityQueue in constant time. */
+    public final T top()
+    {
+        // We don't need to check size here: if maxSize is 0,
+        // then heap is length 2 array with both entries null.
+        // If size is 0 then heap[1] is already null.
+        return heap[1];
+    }
+
+    /** Removes and returns the least element of the PriorityQueue in log(size)
+      time. */
+    public final T pop()
+    {
+        if (size > 0)
+        {
+            T result = heap[1]; // save first value
+            heap[1] = heap[size]; // move last to first
+            heap[size] = null; // permit GC of objects
+            size--;
+            downHeap(); // adjust heap
+            return result;
+        }
+        else
+        {
+            return null;
+        }
+    }
+
+    /**
+     * Should be called when the Object at top changes values. Still log(n) worst
+     * case, but it's at least twice as fast to
+     * 
+     * <pre>
+     * pq.top().change();
+     * pq.updateTop();
+     * </pre>
+     * 
+     * instead of
+     * 
+     * <pre>
+     * o = pq.pop();
+     * o.change();
+     * pq.push(o);
+     * </pre>
+     * 
+     * @return the new 'top' element.
+     */
+    public final T updateTop()
+    {
+        downHeap();
+        return heap[1];
+    }
+
+    /** Returns the number of elements currently stored in the PriorityQueue. */
+    public final int size()
+    {
+        return size;
+    }
+
+    /** Removes all entries from the PriorityQueue. */
+    public final void clear()
+    {
+        for (int i = 0; i <= size; i++)
+        {
+            heap[i] = null;
+        }
+        size = 0;
+    }
+
+    private final void upHeap()
+    {
+        int i = size;
+        T node = heap[i]; // save bottom node
+        int j = i >>> 1;
+        while (j > 0 && lessThan(node, heap[j]))
+        {
+            heap[i] = heap[j]; // shift parents down
+            i = j;
+            j = j >>> 1;
+        }
+        heap[i] = node; // install saved node
+    }
+
+    private final void downHeap()
+    {
+        int i = 1;
+        T node = heap[i]; // save top node
+        int j = i << 1; // find smaller child
+        int k = j + 1;
+        if (k <= size && lessThan(heap[k], heap[j]))
+        {
+            j = k;
+        }
+        while (j <= size && lessThan(heap[j], node))
+        {
+            heap[i] = heap[j]; // shift up child
+            i = j;
+            j = i << 1;
+            k = j + 1;
+            if (k <= size && lessThan(heap[k], heap[j]))
+            {
+                j = k;
+            }
+        }
+        heap[i] = node; // install saved node
+    }
+
+    /** This method returns the internal heap array as Object[].
+     * @lucene.internal
+     */
+    protected final Object[] getHeapArray()
+    {
+        return (Object[]) heap;
+    }
+}

Propchange: myfaces/core/branches/2.0.x/shared/src/main/java/org/apache/myfaces/shared/util/PriorityQueue.java
------------------------------------------------------------------------------
    svn:eol-style = native