You are viewing a plain text version of this content. The canonical link for it is here.
Posted to jcs-dev@jakarta.apache.org by as...@apache.org on 2006/02/17 23:33:33 UTC

svn commit: r378644 - in /jakarta/jcs/trunk: src/java/org/apache/jcs/engine/ src/java/org/apache/jcs/engine/behavior/ src/java/org/apache/jcs/engine/memory/mru/ xdocs/

Author: asmuts
Date: Fri Feb 17 14:33:31 2006
New Revision: 378644

URL: http://svn.apache.org/viewcvs?rev=378644&view=rev
Log:
added additional tests for the test mru impl and removed unused classes

Removed:
    jakarta/jcs/trunk/src/java/org/apache/jcs/engine/Attributes.java
    jakarta/jcs/trunk/src/java/org/apache/jcs/engine/behavior/IAttributes.java
Modified:
    jakarta/jcs/trunk/src/java/org/apache/jcs/engine/CacheEventQueue.java
    jakarta/jcs/trunk/src/java/org/apache/jcs/engine/memory/mru/MRUMemoryCache.java
    jakarta/jcs/trunk/xdocs/IndexedDiskAuxCache.xml
    jakarta/jcs/trunk/xdocs/JCSvsEHCache.xml

Modified: jakarta/jcs/trunk/src/java/org/apache/jcs/engine/CacheEventQueue.java
URL: http://svn.apache.org/viewcvs/jakarta/jcs/trunk/src/java/org/apache/jcs/engine/CacheEventQueue.java?rev=378644&r1=378643&r2=378644&view=diff
==============================================================================
--- jakarta/jcs/trunk/src/java/org/apache/jcs/engine/CacheEventQueue.java (original)
+++ jakarta/jcs/trunk/src/java/org/apache/jcs/engine/CacheEventQueue.java Fri Feb 17 14:33:31 2006
@@ -39,8 +39,6 @@
  * for a specified period, now set to 1 minute. If something comes in after that
  * a new processor thread should be created.
  * 
- * I didn't get all of Hanson's changes in yet, but I did add the
- * syncronization.
  */
 public class CacheEventQueue
     implements ICacheEventQueue
@@ -50,12 +48,14 @@
     private static final int queueType = SINGLE_QUEUE_TYPE;
 
     private static final int DEFAULT_WAIT_TO_DIE_MILLIS = 10000;
-    
+
     // time to wait for an event before snuffing the background thread
     // if the queue is empty.
     // make configurable later
     private int waitToDieMillis = DEFAULT_WAIT_TO_DIE_MILLIS;
 
+    // When the events are pulled off the queue, the tell the listener to handle
+    // the specific event type. The work is done by the listener.
     private ICacheListener listener;
 
     private long listenerId;
@@ -67,16 +67,23 @@
     // in milliseconds
     private int waitBeforeRetry;
 
+    // this is true if there is no worker thread.
     private boolean destroyed = true;
 
+    // This means that the queue is functional.
+    // If we reached the max number of failures, the queue is marked as
+    // non functional and will never work again.
     private boolean working = true;
 
+    // the thread that works the queue.
     private Thread processorThread;
 
     private Object queueLock = new Object();
 
+    // the head of the queue
     private Node head = new Node();
 
+    // the end of the queue
     private Node tail = head;
 
     /**
@@ -136,10 +143,8 @@
      */
     public synchronized void stopProcessing()
     {
-
         destroyed = true;
         processorThread = null;
-
     }
 
     /**
@@ -172,6 +177,8 @@
     }
 
     /**
+     * If they queue has an active thread it is considered alive.
+     * 
      * @return The alive value
      */
     public boolean isAlive()
@@ -191,7 +198,7 @@
     }
 
     /**
-     * @return The {3} value
+     * @return The listenerId value
      */
     public long getListenerId()
     {
@@ -200,6 +207,8 @@
 
     /**
      * Event Q is emtpy.
+     * 
+     * Calling destroy interupts the processor thread.
      */
     public synchronized void destroy()
     {
@@ -281,6 +290,9 @@
     }
 
     /**
+     * This adds a remove all event to the queue. When it is processed, all
+     * elements will be removed from the cache.
+     * 
      * @exception IOException
      */
     public synchronized void addRemoveAllEvent()
@@ -357,9 +369,15 @@
     /**
      * Returns the next cache event from the queue or null if there are no
      * events in the queue.
+     * <p>
+     * We have an empty node at the head and the tail. When we take an item from
+     * the queue we move the next node to the head and then clear the value from
+     * that node. This value is returned.
+     * <p>
+     * When the queue is empty the head node is the same as the tail node.
      * 
      * @return An event to process.
-     *  
+     * 
      */
     private AbstractCacheEvent take()
     {
@@ -450,7 +468,7 @@
         return stats;
     }
 
-    ///////////////////////////// Inner classes /////////////////////////////
+    // /////////////////////////// Inner classes /////////////////////////////
 
     private static class Node
     {
@@ -460,6 +478,8 @@
     }
 
     /**
+     * This is the thread that works the queue.
+     * 
      * @author asmuts
      * @created January 15, 2002
      */
@@ -491,18 +511,18 @@
          */
         public void run()
         {
-            AbstractCacheEvent r = null;
+            AbstractCacheEvent event = null;
 
             while ( queue.isAlive() )
             {
-                r = queue.take();
+                event = queue.take();
 
                 if ( log.isDebugEnabled() )
                 {
-                    log.debug( "Event from queue = " + r );
+                    log.debug( "Event from queue = " + event );
                 }
 
-                if ( r == null )
+                if ( event == null )
                 {
                     synchronized ( queueLock )
                     {
@@ -515,21 +535,21 @@
                             log.warn( "Interrupted while waiting for another event to come in before we die." );
                             return;
                         }
-                        r = queue.take();
+                        event = queue.take();
                         if ( log.isDebugEnabled() )
                         {
-                            log.debug( "Event from queue after sleep = " + r );
+                            log.debug( "Event from queue after sleep = " + event );
                         }
                     }
-                    if ( r == null )
+                    if ( event == null )
                     {
                         queue.stopProcessing();
                     }
                 }
 
-                if ( queue.isWorking() && queue.isAlive() && r != null )
+                if ( queue.isWorking() && queue.isAlive() && event != null )
                 {
-                    r.run();
+                    event.run();
                 }
             }
             if ( log.isInfoEnabled() )
@@ -593,6 +613,8 @@
                     {
                         log.warn( "Interrupted while sleeping for retry on event " + this + "." );
                     }
+                    // TODO consider if this is best. maybe we shoudl just
+                    // destroy
                     setWorking( false );
                     setAlive( false );
                 }
@@ -607,6 +629,8 @@
     }
 
     /**
+     * An element should be put in the cache.
+     * 
      * @author asmuts
      * @created January 15, 2002
      */
@@ -626,10 +650,6 @@
             throws IOException
         {
             this.ice = ice;
-            /*
-             * this.key = key; this.obj = CacheUtils.dup(obj); this.attr = attr;
-             * this.groupName = groupName;
-             */
         }
 
         /**
@@ -640,13 +660,12 @@
         protected void doRun()
             throws IOException
         {
-            /*
-             * CacheElement ce = new CacheElement(cacheName, key, obj);
-             * ce.setElementAttributes( attr ); ce.setGroupName( groupName );
-             */
             listener.handlePut( ice );
         }
 
+        /**
+         * For debugging.
+         */
         public String toString()
         {
             return new StringBuffer( "PutEvent for key: " ).append( ice.getKey() ).append( " value: " )
@@ -656,7 +675,7 @@
     }
 
     /**
-     * Description of the Class
+     * An element should be removed from the cache.
      * 
      * @author asmuts
      * @created January 15, 2002
@@ -702,7 +721,8 @@
     }
 
     /**
-     * Description of the Class
+     * All elements should be removed from the cache when this event is
+     * processed.
      * 
      * @author asmuts
      * @created January 15, 2002
@@ -735,7 +755,7 @@
     }
 
     /**
-     * Description of the Class
+     * The cache should be disposed when this event is processed.
      * 
      * @author asmuts
      * @created January 15, 2002
@@ -770,6 +790,10 @@
     }
 
     /**
+     * This means that the queue is functional. If we reached the max number of
+     * failures, the queue is marked as non functional and will never work
+     * again.
+     * 
      * @param b
      */
     public void setWorking( boolean b )

Modified: jakarta/jcs/trunk/src/java/org/apache/jcs/engine/memory/mru/MRUMemoryCache.java
URL: http://svn.apache.org/viewcvs/jakarta/jcs/trunk/src/java/org/apache/jcs/engine/memory/mru/MRUMemoryCache.java?rev=378644&r1=378643&r2=378644&view=diff
==============================================================================
--- jakarta/jcs/trunk/src/java/org/apache/jcs/engine/memory/mru/MRUMemoryCache.java (original)
+++ jakarta/jcs/trunk/src/java/org/apache/jcs/engine/memory/mru/MRUMemoryCache.java Fri Feb 17 14:33:31 2006
@@ -38,11 +38,13 @@
 import org.apache.jcs.engine.stats.behavior.IStats;
 
 /**
- * A SLOW AS HELL reference management system. The most recently used items move
+ * A SLOW reference management system. The most recently used items move
  * to the front of the list and get spooled to disk if the cache hub is
  * configured to use a disk cache.
+ * <p>
+ * This class is mainly for testing the hub. It also shows that use the
+ * Collection LinkedList is far slower than JCS' own double linked list.
  * 
- * @version $Id$
  */
 public class MRUMemoryCache
     extends AbstractMemoryCache
@@ -75,7 +77,7 @@
     public synchronized void initialize( CompositeCache hub )
     {
         super.initialize( hub );
-        log.info( "initialized MRUMemoryCache for " + cacheName );
+        log.info( "Initialized MRUMemoryCache for " + cacheName );
     }
 
     /**
@@ -118,13 +120,16 @@
         }
         // SPOOL LAST -- need to make this a grouping in a queue
 
-        log.debug( "In RAM overflow" );
+        if ( log.isDebugEnabled() )
+        {
+            log.debug( "In RAM overflow" );
+        }
 
         // write the last item to disk.
         try
         {
-
-            // PUSH 5 TO DISK TO MINIMIZE THE TYPICAL
+            // PUSH more than 1 TO DISK TO MINIMIZE THE TYPICAL spool at each
+            // put.
             int chunkSizeCorrected = Math.min( size, chunkSize );
 
             if ( log.isDebugEnabled() )
@@ -139,18 +144,19 @@
             // and wouldn't save much time in this synchronous call.
             for ( int i = 0; i < chunkSizeCorrected; i++ )
             {
-                // Might want to rename this "overflow" incase the hub
-                // wants to do something else.
-                Serializable last = (Serializable) mrulist.getLast();
-                ICacheElement ceL = (ICacheElement) map.get( last );
-                cache.spoolToDisk( ceL );
 
+                ICacheElement toSpool = null;
+                
                 // need a more fine grained locking here
-                synchronized ( map )
+                synchronized ( lockMe )
                 {
+                    Serializable last = (Serializable) mrulist.removeLast();
+                    toSpool = (ICacheElement) map.get( last );
                     map.remove( last );
-                    mrulist.remove( last );
                 }
+                // Might want to rename this "overflow" incase the hub
+                // wants to do something else.
+                cache.spoolToDisk( toSpool );
             }
 
             if ( log.isDebugEnabled() )
@@ -163,7 +169,7 @@
         catch ( Exception ex )
         {
             // impossible case.
-            ex.printStackTrace();
+            log.error( "Problem updating MRU.", ex );
             throw new IllegalStateException( ex.getMessage() );
         }
     }
@@ -184,7 +190,6 @@
 
         try
         {
-
             ce = (ICacheElement) map.get( key );
             if ( ce != null )
             {
@@ -192,24 +197,24 @@
                 {
                     log.debug( cacheName + ": MRUMemoryCache quiet hit for " + key );
                 }
-
             }
             else
             {
                 log.debug( cacheName + ": MRUMemoryCache quiet miss for " + key );
             }
-
         }
         catch ( Exception e )
         {
-            log.error( e );
+            log.error( "Problem getting quietly from MRU.", e );
         }
 
         return ce;
     }
 
+       
     /**
-     * Description of the Method
+     * Gets an item out of the map. If it finds an item, it is removed from the
+     * list and then added to the first position in the linked list.
      * 
      * @return
      * @param key
@@ -223,7 +228,6 @@
 
         try
         {
-
             if ( log.isDebugEnabled() )
             {
                 log.debug( "get> key=" + key );
@@ -246,11 +250,10 @@
                     log.debug( cacheName + " -- RAM-HIT for " + key );
                 }
             }
-
         }
         catch ( Exception e )
         {
-            log.error( e );
+            log.error( "Problem getting element.", e );
         }
 
         try
@@ -289,8 +292,6 @@
         return ce;
     }
 
-    // end get
-
     /**
      * Removes an item from the cache.
      * 
@@ -304,17 +305,15 @@
         if ( log.isDebugEnabled() )
         {
             log.debug( "remove> key=" + key );
-            //+, nonLocal="+nonLocal);
         }
 
-        //p("remove> key="+key+", nonLocal="+nonLocal);
         boolean removed = false;
 
         // handle partial removal
         if ( key instanceof String && key.toString().endsWith( CacheConstants.NAME_COMPONENT_DELIMITER ) )
         {
             // remove all keys of the same name hierarchy.
-            synchronized ( map )
+            synchronized ( lockMe )
             {
                 for ( Iterator itr = map.entrySet().iterator(); itr.hasNext(); )
                 {
@@ -323,8 +322,8 @@
                     if ( k instanceof String && k.toString().startsWith( key.toString() ) )
                     {
                         itr.remove();
-                        Serializable keyR = (ICacheElement) entry.getKey();
-                        map.remove( keyR );
+                        Serializable keyR = (Serializable) entry.getKey();
+                        //map.remove( keyR );
                         mrulist.remove( keyR );
                         removed = true;
                     }
@@ -334,7 +333,7 @@
         else if ( key instanceof GroupId )
         {
             // remove all keys of the same name hierarchy.
-            synchronized ( map )
+            synchronized ( lockMe )
             {
                 for ( Iterator itr = map.entrySet().iterator(); itr.hasNext(); )
                 {
@@ -357,7 +356,6 @@
             {
                 synchronized ( lockMe )
                 {
-
                     map.remove( key );
                     mrulist.remove( key );
                 }
@@ -375,9 +373,9 @@
      */
     public Object[] getKeyArray()
     {
+        // need to lock to map here?
         synchronized ( lockMe )
         {
-            // may need to lock to map here?
             return map.keySet().toArray();
         }
     }
@@ -390,7 +388,7 @@
         log.debug( "dumpingMap" );
         for ( Iterator itr = map.entrySet().iterator(); itr.hasNext(); )
         {
-            //for ( Iterator itr = memCache.getIterator(); itr.hasNext();) {
+            // for ( Iterator itr = memCache.getIterator(); itr.hasNext();) {
             Map.Entry e = (Map.Entry) itr.next();
             ICacheElement ce = (ICacheElement) e.getValue();
             log.debug( "dumpMap> key=" + e.getKey() + ", val=" + ce.getVal() );

Modified: jakarta/jcs/trunk/xdocs/IndexedDiskAuxCache.xml
URL: http://svn.apache.org/viewcvs/jakarta/jcs/trunk/xdocs/IndexedDiskAuxCache.xml?rev=378644&r1=378643&r2=378644&view=diff
==============================================================================
--- jakarta/jcs/trunk/xdocs/IndexedDiskAuxCache.xml (original)
+++ jakarta/jcs/trunk/xdocs/IndexedDiskAuxCache.xml Fri Feb 17 14:33:31 2006
@@ -1,141 +1,174 @@
 <?xml version="1.0"?>
 
 <document>
-  <properties>
-    <title>Indexed Disk Auxiliary Cache</title>
-    <author email="ASmuts@yahoo.com">Aaron Smuts</author>
-  </properties>
-
-  <body>
-    <section name="Indexed Disk Auxiliary Cache">
-      <p> 
-        The Indexed Disk Auxiliary Cache is an optional plugin for the
-        JCS.  It is primarily intended to provide a secondary store to 
-        ease the memory burden of the cache.  When the memory cache 
-  	    exceeds its maximum size it tells the cache hub that the item
-	      to be removed from memory should be spooled to disk.  The cache 
-	      checks to see if any auxiliaries of type "disk" have been 
-	      configured for the region.  If the "Indexed Disk Auxiliary Cache"  
-	      is used, the item will be spooled to disk.
-      </p>
-
-      <subsection name="Disk Indexing">
-      <p>
-        The Indexed Disk Auxiliary Cache follows the fastest 
-    	  pattern of disk caching.  Items are stored at the end of a file 
-	      dedicated to the cache region.  The first byte of each disk entry
-	      specifies the length of the entry.  The start position in the file
-	      is saved in memory, referenced by the item's key.  Though this still
-	      requires memory, it is insignificant given the performance trade 
-	      off.  Depending on the key size, 500,000 disk entries will probably 
-  	    only require about 1 MB of memory.  Locating the position of an item is 
-	      as fast as a map lookup and the retrieval of the item only requires 2
-    	  disk accesses.
-      </p>
-      <p>
-	  When items are removed from the disk cache, the location of the available
-	  block on the storage file is recorded in a sorted preferential array of a 
-	  size not to exceed the maximum number of keys allowed in memory.  This allows
-	  the disk cache to reuse empty spots, thereby keeping the file size to a 
-	  minimum.
-      </p>
-      </subsection>
-
-      <subsection name="Purgatory">
-      <p>
-	      Writing to the disk cache is asynchronous and made efficient by using a 
-        memory staging area called purgatory.  Retrievals check purgatory then
-     	  disk for an item.  When items are sent to purgatory they are simultaneously 
-        queued to be put to disk.  If an item is retrieved from purgatory it will no
-        longer be written to disk, since the cache hub will move it back to memory.  
-        Using purgatory insures that there is no wait for disk writes, unecessary 
-        disk writes are avoided for borderline items, and the items are always 
-        available.  		
-      </p>
-      </subsection>
-
-      <subsection name="Persistence">
-      <p>
-	      When the disk cache is properly shutdown, the memory index is written
-        to disk and the value file is defragmented.  When the cache starts
-        up, the disk cache can be configured to read or delete the index file.  This 
-        provides an unreliable persistence mechanism.   
-      </p>
-      </subsection>
-
-      <subsection name="Configuration">
-        <p>
-          The simple configuration and is done in the auxiliary 
-          cache section of the <code>cache.ccf</code> configuration file.
-          In the example below, I created an Indexed Disk Auxiliary Cache 
-          referenced by <code>DC</code>.  It uses files located in the 
-          "DiskPath" directory.
-         </p>
-	   <p>
-	      The Disk indexes are equipped with an LRU storage limit.  The maximum 
-          number of keys is configured by the maxKeySize parameter.  If the
-          maximum key size is less than 0, no limit will be placed on the
-          number of keys.  By default, the max key size is 5000.
-	   </p>
-        <source><![CDATA[
+	<properties>
+		<title>Indexed Disk Auxiliary Cache</title>
+		<author email="ASmuts@apache.org">Aaron Smuts</author>
+	</properties>
+
+	<body>
+		<section name="Indexed Disk Auxiliary Cache">
+			<p>
+				The Indexed Disk Auxiliary Cache is an optional plugin
+				for the JCS. It is primarily intended to provide a
+				secondary store to ease the memory burden of the cache.
+				When the memory cache exceeds its maximum size it tells
+				the cache hub that the item to be removed from memory
+				should be spooled to disk. The cache checks to see if
+				any auxiliaries of type "disk" have been configured for
+				the region. If the "Indexed Disk Auxiliary Cache" is
+				used, the item will be spooled to disk.
+			</p>
+
+			<subsection name="Disk Indexing">
+				<p>
+					The Indexed Disk Auxiliary Cache follows the fastest
+					pattern of disk caching. Items are stored at the end
+					of a file dedicated to the cache region. The first
+					byte of each disk entry specifies the length of the
+					entry. The start position in the file is saved in
+					memory, referenced by the item's key. Though this
+					still requires memory, it is insignificant given the
+					performance trade off. Depending on the key size,
+					500,000 disk entries will probably only require
+					about 1 MB of memory. Locating the position of an
+					item is as fast as a map lookup and the retrieval of
+					the item only requires 2 disk accesses.
+				</p>
+				<p>
+					When items are removed from the disk cache, the
+					location of the available block on the storage file
+					is recorded in a sorted preferential array of a size
+					not to exceed the maximum number of keys allowed in
+					memory. This allows the disk cache to reuse empty
+					spots, thereby keeping the file size to a minimum.
+				</p>
+			</subsection>
+
+			<subsection name="Purgatory">
+				<p>
+					Writing to the disk cache is asynchronous and made
+					efficient by using a memory staging area called
+					purgatory. Retrievals check purgatory then disk for
+					an item. When items are sent to purgatory they are
+					simultaneously queued to be put to disk. If an item
+					is retrieved from purgatory it will no longer be
+					written to disk, since the cache hub will move it
+					back to memory. Using purgatory insures that there
+					is no wait for disk writes, unecessary disk writes
+					are avoided for borderline items, and the items are
+					always available.
+				</p>
+			</subsection>
+
+			<subsection name="Persistence">
+				<p>
+					When the disk cache is properly shutdown, the memory
+					index is written to disk and the value file is
+					defragmented. When the cache starts up, the disk
+					cache can be configured to read or delete the index
+					file. This provides an unreliable persistence
+					mechanism.
+				</p>
+			</subsection>
+
+			<subsection name="Configuration">
+				<p>
+					The simple configuration and is done in the
+					auxiliary cache section of the
+					<code>cache.ccf</code>
+					configuration file. In the example below, I created
+					an Indexed Disk Auxiliary Cache referenced by
+					<code>DC</code>
+					. It uses files located in the "DiskPath" directory.
+				</p>
+				<p>
+					The Disk indexes are equipped with an LRU storage
+					limit. The maximum number of keys is configured by
+					the maxKeySize parameter. If the maximum key size is
+					less than 0, no limit will be placed on the number
+					of keys. By default, the max key size is 5000.
+				</p>
+				<source>
+					<![CDATA[
 jcs.auxiliary.DC=
     org.apache.jcs.auxiliary.disk.indexed.IndexedDiskCacheFactory
 jcs.auxiliary.DC.attributes=
     org.apache.jcs.auxiliary.disk.indexed.IndexedDiskCacheAttributes
 jcs.auxiliary.DC.attributes.DiskPath=g:\dev\jakarta-turbine-stratum\raf
 jcs.auxiliary.DC.attributes.MaxKeySize=100000
-        ]]></source>
-      </subsection>
+        ]]>
+				</source>
+			</subsection>
 
-      <subsection name="Additional Configuration Options">
-        <p>
-          The indexed disk cache provides some additional configuration options.
-	   </p>
-	   <p>
-	    The purgatory size of the Disk cache is equipped with an LRU storage limit.
-	    The maximum number of elements allowed in purgatory is configured by the 
-	    MaxPurgatorySize parameter.  By default, the max purgatory size is 5000.
-	   </p>
-	   <p>
-	    Initial testing indicates that the disk cache performs better when the 
-	    key and purgatory sizes are limited.
-	   </p>
-        <source><![CDATA[
+			<subsection name="Additional Configuration Options">
+				<p>
+					The indexed disk cache provides some additional
+					configuration options.
+				</p>
+				<p>
+					The purgatory size of the Disk cache is equipped
+					with an LRU storage limit. The maximum number of
+					elements allowed in purgatory is configured by the
+					MaxPurgatorySize parameter. By default, the max
+					purgatory size is 5000.
+				</p>
+				<p>
+					Initial testing indicates that the disk cache
+					performs better when the key and purgatory sizes are
+					limited.
+				</p>
+				<source>
+					<![CDATA[
 jcs.auxiliary.DC.attributes.MaxPurgatorySize=10000
-        ]]></source>
-	   <p>
-	    Slots in the data file become empty when items are removed from the disk cache.
-	    The indexed disk cache keeps track of empty slots in the data file, so they can
-	    be reused.  The slot locations are stored in a sorted preferential array --
-	    the recycle bin.  The smallest items are removed from the recycle bin when it 
-	    reaches the specified limit.  The MaxRecycleBinSize cannot be larger than
-	    the MaxKeySize.  If the MaxKeySize is less than 0, the recycle bin will default 
-	    to 5000.  
-	   </p>
-        <source><![CDATA[
+        ]]>
+				</source>
+				<p>
+					Slots in the data file become empty when items are
+					removed from the disk cache. The indexed disk cache
+					keeps track of empty slots in the data file, so they
+					can be reused. The slot locations are stored in a
+					sorted preferential array -- the recycle bin. The
+					smallest items are removed from the recycle bin when
+					it reaches the specified limit. The
+					MaxRecycleBinSize cannot be larger than the
+					MaxKeySize. If the MaxKeySize is less than 0, the
+					recycle bin will default to 5000.
+				</p>
+				<source>
+					<![CDATA[
 jcs.auxiliary.DC.attributes.MaxRecycleBinSize=10000
-        ]]></source>
-	   <p>
-	    The Disk cache can be configured to defragment the data file at runtime.  Since
-	    defragmentation is only necessary if items have been removed, the deframentation
-	    interval is determined by the number of removes.  Currently there is no way
-	    to schedule defragmentation to run at a set time.  If you set the OptimizeAtRemoveCount
-	    to -1, no optimizations of the data file will occur until shutdown.  By default
-	    the value is -1.
-	   </p>
-        <source><![CDATA[
+        ]]>
+				</source>
+				<p>
+					The Disk cache can be configured to defragment the
+					data file at runtime. Since defragmentation is only
+					necessary if items have been removed, the
+					deframentation interval is determined by the number
+					of removes. Currently there is no way to schedule
+					defragmentation to run at a set time. If you set the
+					OptimizeAtRemoveCount to -1, no optimizations of the
+					data file will occur until shutdown. By default the
+					value is -1.
+				</p>
+				<source>
+					<![CDATA[
 jcs.auxiliary.DC.attributes.OptimizeAtRemoveCount=30000
-        ]]></source>
-      </subsection>
+        ]]>
+				</source>
+			</subsection>
 
-      <subsection name="A Complete Configuration Example">
-        <p>
-          In this sample cache.ccf file, I configured the cache to use a disk cache, 
-          called DC, by default.  Also, I explicitly set a cache region called myRegion1
-          to use DC.  I specified custom settings for all of the Indexed Disk Cache
-          configuration parameters. 
-	   </p>      
-        <source><![CDATA[        
+			<subsection name="A Complete Configuration Example">
+				<p>
+					In this sample cache.ccf file, I configured the
+					cache to use a disk cache, called DC, by default.
+					Also, I explicitly set a cache region called
+					myRegion1 to use DC. I specified custom settings for
+					all of the Indexed Disk Cache configuration
+					parameters.
+				</p>
+				<source>
+					<![CDATA[        
 ##############################################################
 ##### Default Region Configuration
 jcs.default=DC
@@ -160,15 +193,80 @@
 jcs.auxiliary.DC.attributes.MaxKeySize=10000
 jcs.auxiliary.DC.attributes.OptimizeAtRemoveCount=300000
 jcs.auxiliary.DC.attributes.MaxRecycleBinSize=7500
-        ]]></source>
-      </subsection>
+        ]]>
+				</source>
+			</subsection>
+
+			<subsection name="Using Thread Pools to Reduce Threads">
+				<p>
+					The Indexed Disk Cache allows you to use fewer
+					threads than active regions. By default the disk
+					cache will use the standard cache event queue which
+					has a dedicated thread. Although the standard queue
+					kills its worker thread after a minute of
+					inactivity, you may want to restrict the total
+					number of threads. You can accomplish this by using
+					a pooled event queue.
+				</p>
+				<p>
+					The configuration file below defines a disk cache
+					called DC2. It uses an event queue of type POOLED.
+					The queue is named disk_cache_event_queue. The
+					disk_cache_event_queue is defined in the bottom of
+					the file.
+				</p>
+				<source>
+					<![CDATA[ 
+##############################################################
+################## DEFAULT CACHE REGION  #####################
+# sets the default aux value for any non configured caches
+jcs.default=DC2
+jcs.default.cacheattributes=org.apache.jcs.engine.CompositeCacheAttributes
+jcs.default.cacheattributes.MaxObjects=200001
+jcs.default.cacheattributes.MemoryCacheName=org.apache.jcs.engine.memory.lru.LRUMemoryCache
+jcs.default.cacheattributes.UseMemoryShrinker=false
+jcs.default.cacheattributes.MaxMemoryIdleTimeSeconds=3600
+jcs.default.cacheattributes.ShrinkerIntervalSeconds=60
+jcs.default.elementattributes=org.apache.jcs.engine.ElementAttributes
+jcs.default.elementattributes.IsEternal=false
+jcs.default.elementattributes.MaxLifeSeconds=700
+jcs.default.elementattributes.IdleTime=1800
+jcs.default.elementattributes.IsSpool=true
+jcs.default.elementattributes.IsRemote=true
+jcs.default.elementattributes.IsLateral=true
+
+##############################################################
+################## AUXILIARY CACHES AVAILABLE ################
+
+# Disk Cache Using a Pooled Event Queue -- this allows you
+# to control the maximum number of threads it will use.
+# Each region uses 1 thread by default in the SINGLE model.
+# adding more threads than regions does not help performance.
+# If you want to use a separate pool for each disk cache, either use
+# the single model or define a different auxiliary for each region and use the Pooled type.
+# SINGLE is generally best unless you ahve a huge # of regions.
+jcs.auxiliary.DC2=org.apache.jcs.auxiliary.disk.indexed.IndexedDiskCacheFactory
+jcs.auxiliary.DC2.attributes=org.apache.jcs.auxiliary.disk.indexed.IndexedDiskCacheAttributes
+jcs.auxiliary.DC2.attributes.DiskPath=target/test-sandbox/raf
+jcs.auxiliary.DC2.attributes.MaxPurgatorySize=10000
+jcs.auxiliary.DC2.attributes.MaxKeySize=10000
+jcs.auxiliary.DC2.attributes.MaxRecycleBinSize=5000
+jcs.auxiliary.DC2.attributes.OptimizeAtRemoveCount=300000
+jcs.auxiliary.DC2.attributes.EventQueueType=POOLED
+jcs.auxiliary.DC2.attributes.EventQueuePoolName=disk_cache_event_queue
+
+##############################################################
+################## OPTIONAL THREAD POOL CONFIGURATION ########
 
-       <subsection name="TODO">
-        <p>
-          The Indexed Disk Auxiliary Cache will eventually be equiped 
-          with periodic index storage. 
-        </p>
-      </subsection>
-    </section>
-  </body>
+# Disk Cache Event Queue Pool
+thread_pool.disk_cache_event_queue.useBoundary=false
+thread_pool.remote_cache_client.maximumPoolSize=15
+thread_pool.disk_cache_event_queue.minimumPoolSize=1
+thread_pool.disk_cache_event_queue.keepAliveTime=3500
+thread_pool.disk_cache_event_queue.startUpSize=1
+        ]]>
+				</source>
+			</subsection>
+		</section>
+	</body>
 </document>

Modified: jakarta/jcs/trunk/xdocs/JCSvsEHCache.xml
URL: http://svn.apache.org/viewcvs/jakarta/jcs/trunk/xdocs/JCSvsEHCache.xml?rev=378644&r1=378643&r2=378644&view=diff
==============================================================================
--- jakarta/jcs/trunk/xdocs/JCSvsEHCache.xml (original)
+++ jakarta/jcs/trunk/xdocs/JCSvsEHCache.xml Fri Feb 17 14:33:31 2006
@@ -7,189 +7,217 @@
 	</properties>
 
 	<body>
-		<section name="Initial Results">
-			<p>
-				I just built both EHCache (1.2-beta4) and JCS (1.2.7.0)
-				from head, configured both similarly and ran 20 rounds
-				of 50,000 puts and gets, that is 1,000,000 puts and gets
-				in total. Using the default LRU Memory Cache, the same
-				algorithm that EHCache uses by default,
-				<b>JCS proved to be nearly twice as fast as EHCache</b>
-				in multiple trials for both puts and gets. I have the
-				log levels for both set at info. I would like to further
-				verify my results, since they completely contradict the
-				information on the EHCache site.
-			</p>
-			<p>
-				From what I can tell so far, JCS is significantly faster
-				than EHCache when you are retrieving items that exist in
-				the cache and when you are putting items into a cache
-				that has not reached its size limit.
-			</p>
-			<p>
-				Additional testing shows that when the size limit it
-				reached, JCS and EHCache perform similarly for puts and
-				gets. Although JCS gets are significantly faster when
-				the items are present, they are almost exactly the same
-				when the items are not in the cache. My initial tests
-				revealed a less than 1% difference, but subsequent runs
-				showed JCS as 20% faster. More tests are needed before
-				the results are conclusive.
-			</p>
-			<p>
-				Since, neither cache will be a relevant bottleneck in
-				any application where a cache would be useful, the
-				differences in performance may be beside the point.
-				Nevertheless, it is important to note that the EHCache
-				web site provides, what appears to be, false test data.
-			</p>
-			<p>
-				The peculiar result is that a few years back EHCache
-				took the JCS source code, removed most of its features,
-				and ended up with something that performs worse.
-			</p>
-		</section>
+		<section name="JCS vs EHCache Memory Performance">
+			<subsection name="Initial Test Results">
+				<p>
+					I just built both EHCache (1.2-beta4) and JCS
+					(1.2.7.0) from head, configured both similarly and
+					ran 20 rounds of 50,000 puts and gets, that is
+					1,000,000 puts and gets in total. Using the default
+					LRU Memory Cache, the same algorithm that EHCache
+					uses by default,
+					<b>
+						JCS proved to be nearly twice as fast as EHCache
+					</b>
+					in multiple trials for both puts and gets. I have
+					the log levels for both set at info. I would like to
+					further verify my results, since they completely
+					contradict the information on the EHCache site.
+				</p>
+				<p>
+					From what I can tell so far, JCS is significantly
+					faster than EHCache when you are retrieving items
+					that exist in the cache and when you are putting
+					items into a cache that has not reached its size
+					limit.
+				</p>
+				<p>
+					Additional testing shows that when the size limit it
+					reached, JCS and EHCache perform similarly for puts
+					and gets. Although JCS gets are significantly faster
+					when the items are present, they are almost exactly
+					the same when the items are not in the cache. My
+					initial tests revealed a less than 1% difference,
+					but subsequent runs showed JCS as 20% faster. More
+					tests are needed before the results are conclusive.
+				</p>
+				<p>
+					Since, neither cache will be a relevant bottleneck
+					in any application where a cache would be useful,
+					the differences in performance may be beside the
+					point. Nevertheless, it is important to note that
+					the EHCache web site provides, what appears to be,
+					false test data.
+				</p>
+				<p>
+					The peculiar result is that a few years back EHCache
+					took the JCS source code, removed most of its
+					features, and ended up with something that performs
+					worse.
+				</p>
+			</subsection>
+
+
+			<subsection name="Test Data">
+				<p>Here is the data from the first test:</p>
+				<p>
+					JCS put time for 50000 = 651; millis per = 0.01302
+					JCS get time for 50000 = 160; millis per = 0.0032
+					EHCache put time for 50000 = 481; millis per =
+					0.00962 EHCache get time for 50000 = 110; millis per
+					= 0.0022
+				</p>
+				<p>
+					JCS put time for 50000 = 240; millis per = 0.0048
+					JCS get time for 50000 = 90; millis per = 0.0018
+					EHCache put time for 50000 = 491; millis per =
+					0.00982 EHCache get time for 50000 = 120; millis per
+					= 0.0024
+				</p>
+				<p>
+					JCS put time for 50000 = 241; millis per = 0.00482
+					JCS get time for 50000 = 80; millis per = 0.0016
+					EHCache put time for 50000 = 551; millis per =
+					0.01102 EHCache get time for 50000 = 110; millis per
+					= 0.0022
+				</p>
+				<p>
+					JCS put time for 50000 = 240; millis per = 0.0048
+					JCS get time for 50000 = 90; millis per = 0.0018
+					EHCache put time for 50000 = 481; millis per =
+					0.00962 EHCache get time for 50000 = 130; millis per
+					= 0.0026
+				</p>
+				<p>
+					JCS put time for 50000 = 230; millis per = 0.0046
+					JCS get time for 50000 = 181; millis per = 0.00362
+					EHCache put time for 50000 = 520; millis per =
+					0.0104 EHCache get time for 50000 = 101; millis per
+					= 0.00202
+				</p>
+				<p>
+					JCS put time for 50000 = 220; millis per = 0.0044
+					JCS get time for 50000 = 90; millis per = 0.0018
+					EHCache put time for 50000 = 641; millis per =
+					0.01282 EHCache get time for 50000 = 110; millis per
+					= 0.0022
+				</p>
+				<p>
+					JCS put time for 50000 = 250; millis per = 0.0050
+					JCS get time for 50000 = 121; millis per = 0.00242
+					EHCache put time for 50000 = 590; millis per =
+					0.0118 EHCache get time for 50000 = 101; millis per
+					= 0.00202
+				</p>
+				<p>
+					JCS put time for 50000 = 260; millis per = 0.0052
+					JCS get time for 50000 = 100; millis per = 0.0020
+					EHCache put time for 50000 = 581; millis per =
+					0.01162 EHCache get time for 50000 = 100; millis per
+					= 0.0020
+				</p>
+				<p>
+					JCS put time for 50000 = 290; millis per = 0.0058
+					JCS get time for 50000 = 121; millis per = 0.00242
+					EHCache put time for 50000 = 570; millis per =
+					0.0114 EHCache get time for 50000 = 121; millis per
+					= 0.00242
+				</p>
+				<p>
+					JCS put time for 50000 = 210; millis per = 0.0042
+					JCS get time for 50000 = 120; millis per = 0.0024
+					EHCache put time for 50000 = 561; millis per =
+					0.01122 EHCache get time for 50000 = 130; millis per
+					= 0.0026
+				</p>
+				<p>
+					JCS put time for 50000 = 250; millis per = 0.0050
+					JCS get time for 50000 = 151; millis per = 0.00302
+					EHCache put time for 50000 = 560; millis per =
+					0.0112 EHCache get time for 50000 = 111; millis per
+					= 0.00222
+				</p>
+				<p>
+					JCS put time for 50000 = 250; millis per = 0.0050
+					JCS get time for 50000 = 100; millis per = 0.0020
+					EHCache put time for 50000 = 711; millis per =
+					0.01422 EHCache get time for 50000 = 100; millis per
+					= 0.0020
+				</p>
+				<p>
+					JCS put time for 50000 = 251; millis per = 0.00502
+					JCS get time for 50000 = 90; millis per = 0.0018
+					EHCache put time for 50000 = 511; millis per =
+					0.01022 EHCache get time for 50000 = 90; millis per
+					= 0.0018
+				</p>
+				<p>
+					JCS put time for 50000 = 220; millis per = 0.0044
+					JCS get time for 50000 = 100; millis per = 0.0020
+					EHCache put time for 50000 = 491; millis per =
+					0.00982 EHCache get time for 50000 = 90; millis per
+					= 0.0018
+				</p>
+				<p>
+					JCS put time for 50000 = 230; millis per = 0.0046
+					JCS get time for 50000 = 80; millis per = 0.0016
+					EHCache put time for 50000 = 201; millis per =
+					0.00402 EHCache get time for 50000 = 390; millis per
+					= 0.0078
+				</p>
+				<p>
+					JCS put time for 50000 = 201; millis per = 0.00402
+					JCS get time for 50000 = 120; millis per = 0.0024
+					EHCache put time for 50000 = 180; millis per =
+					0.0036 EHCache get time for 50000 = 411; millis per
+					= 0.00822
+				</p>
+				<p>
+					JCS put time for 50000 = 210; millis per = 0.0042
+					JCS get time for 50000 = 100; millis per = 0.0020
+					EHCache put time for 50000 = 210; millis per =
+					0.0042 EHCache get time for 50000 = 381; millis per
+					= 0.00762
+				</p>
+				<p>
+					JCS put time for 50000 = 240; millis per = 0.0048
+					JCS get time for 50000 = 90; millis per = 0.0018
+					EHCache put time for 50000 = 211; millis per =
+					0.00422 EHCache get time for 50000 = 410; millis per
+					= 0.0082
+				</p>
+				<p>
+					JCS put time for 50000 = 221; millis per = 0.00442
+					JCS get time for 50000 = 80; millis per = 0.0016
+					EHCache put time for 50000 = 210; millis per =
+					0.0042 EHCache get time for 50000 = 411; millis per
+					= 0.00822
+				</p>
+				<p>
+					JCS put time for 50000 = 220; millis per = 0.0044
+					JCS get time for 50000 = 80; millis per = 0.0016
+					EHCache put time for 50000 = 190; millis per =
+					0.0038 EHCache get time for 50000 = 411; millis per
+					= 0.00822
+				</p>
+				<p>Finished 20 loops of 50000 gets and puts</p>
+				<p>
+					Put average for JCS = 256 Put average for EHCache =
+					447 JCS puts took 0.57270694 times the EHCache , the
+					goal is less than 1.0x
+				</p>
+				<p>
+					Get average for JCS = 107 Get average for EHCache =
+					196 JCS gets took 0.54591835 times the EHCache , the
+					goal is less than 1.0x
+				</p>
+			</subsection>
 
-		<section name="Test Data">
-			<p>Here is the data from the first test:</p>
-			<p>
-				JCS put time for 50000 = 651; millis per = 0.01302 JCS
-				get time for 50000 = 160; millis per = 0.0032 EHCache
-				put time for 50000 = 481; millis per = 0.00962 EHCache
-				get time for 50000 = 110; millis per = 0.0022
-			</p>
-			<p>
-				JCS put time for 50000 = 240; millis per = 0.0048 JCS
-				get time for 50000 = 90; millis per = 0.0018 EHCache put
-				time for 50000 = 491; millis per = 0.00982 EHCache get
-				time for 50000 = 120; millis per = 0.0024
-			</p>
-			<p>
-				JCS put time for 50000 = 241; millis per = 0.00482 JCS
-				get time for 50000 = 80; millis per = 0.0016 EHCache put
-				time for 50000 = 551; millis per = 0.01102 EHCache get
-				time for 50000 = 110; millis per = 0.0022
-			</p>
-			<p>
-				JCS put time for 50000 = 240; millis per = 0.0048 JCS
-				get time for 50000 = 90; millis per = 0.0018 EHCache put
-				time for 50000 = 481; millis per = 0.00962 EHCache get
-				time for 50000 = 130; millis per = 0.0026
-			</p>
-			<p>
-				JCS put time for 50000 = 230; millis per = 0.0046 JCS
-				get time for 50000 = 181; millis per = 0.00362 EHCache
-				put time for 50000 = 520; millis per = 0.0104 EHCache
-				get time for 50000 = 101; millis per = 0.00202
-			</p>
-			<p>
-				JCS put time for 50000 = 220; millis per = 0.0044 JCS
-				get time for 50000 = 90; millis per = 0.0018 EHCache put
-				time for 50000 = 641; millis per = 0.01282 EHCache get
-				time for 50000 = 110; millis per = 0.0022
-			</p>
-			<p>
-				JCS put time for 50000 = 250; millis per = 0.0050 JCS
-				get time for 50000 = 121; millis per = 0.00242 EHCache
-				put time for 50000 = 590; millis per = 0.0118 EHCache
-				get time for 50000 = 101; millis per = 0.00202
-			</p>
-			<p>
-				JCS put time for 50000 = 260; millis per = 0.0052 JCS
-				get time for 50000 = 100; millis per = 0.0020 EHCache
-				put time for 50000 = 581; millis per = 0.01162 EHCache
-				get time for 50000 = 100; millis per = 0.0020
-			</p>
-			<p>
-				JCS put time for 50000 = 290; millis per = 0.0058 JCS
-				get time for 50000 = 121; millis per = 0.00242 EHCache
-				put time for 50000 = 570; millis per = 0.0114 EHCache
-				get time for 50000 = 121; millis per = 0.00242
-			</p>
-			<p>
-				JCS put time for 50000 = 210; millis per = 0.0042 JCS
-				get time for 50000 = 120; millis per = 0.0024 EHCache
-				put time for 50000 = 561; millis per = 0.01122 EHCache
-				get time for 50000 = 130; millis per = 0.0026
-			</p>
-			<p>
-				JCS put time for 50000 = 250; millis per = 0.0050 JCS
-				get time for 50000 = 151; millis per = 0.00302 EHCache
-				put time for 50000 = 560; millis per = 0.0112 EHCache
-				get time for 50000 = 111; millis per = 0.00222
-			</p>
-			<p>
-				JCS put time for 50000 = 250; millis per = 0.0050 JCS
-				get time for 50000 = 100; millis per = 0.0020 EHCache
-				put time for 50000 = 711; millis per = 0.01422 EHCache
-				get time for 50000 = 100; millis per = 0.0020
-			</p>
-			<p>
-				JCS put time for 50000 = 251; millis per = 0.00502 JCS
-				get time for 50000 = 90; millis per = 0.0018 EHCache put
-				time for 50000 = 511; millis per = 0.01022 EHCache get
-				time for 50000 = 90; millis per = 0.0018
-			</p>
-			<p>
-				JCS put time for 50000 = 220; millis per = 0.0044 JCS
-				get time for 50000 = 100; millis per = 0.0020 EHCache
-				put time for 50000 = 491; millis per = 0.00982 EHCache
-				get time for 50000 = 90; millis per = 0.0018
-			</p>
-			<p>
-				JCS put time for 50000 = 230; millis per = 0.0046 JCS
-				get time for 50000 = 80; millis per = 0.0016 EHCache put
-				time for 50000 = 201; millis per = 0.00402 EHCache get
-				time for 50000 = 390; millis per = 0.0078
-			</p>
-			<p>
-				JCS put time for 50000 = 201; millis per = 0.00402 JCS
-				get time for 50000 = 120; millis per = 0.0024 EHCache
-				put time for 50000 = 180; millis per = 0.0036 EHCache
-				get time for 50000 = 411; millis per = 0.00822
-			</p>
-			<p>
-				JCS put time for 50000 = 210; millis per = 0.0042 JCS
-				get time for 50000 = 100; millis per = 0.0020 EHCache
-				put time for 50000 = 210; millis per = 0.0042 EHCache
-				get time for 50000 = 381; millis per = 0.00762
-			</p>
-			<p>
-				JCS put time for 50000 = 240; millis per = 0.0048 JCS
-				get time for 50000 = 90; millis per = 0.0018 EHCache put
-				time for 50000 = 211; millis per = 0.00422 EHCache get
-				time for 50000 = 410; millis per = 0.0082
-			</p>
-			<p>
-				JCS put time for 50000 = 221; millis per = 0.00442 JCS
-				get time for 50000 = 80; millis per = 0.0016 EHCache put
-				time for 50000 = 210; millis per = 0.0042 EHCache get
-				time for 50000 = 411; millis per = 0.00822
-			</p>
-			<p>
-				JCS put time for 50000 = 220; millis per = 0.0044 JCS
-				get time for 50000 = 80; millis per = 0.0016 EHCache put
-				time for 50000 = 190; millis per = 0.0038 EHCache get
-				time for 50000 = 411; millis per = 0.00822
-			</p>
-			<p>Finished 20 loops of 50000 gets and puts</p>
-			<p>
-				Put average for JCS = 256 Put average for EHCache = 447
-				JCS puts took 0.57270694 times the EHCache , the goal is
-				less than 1.0x
-			</p>
-			<p>
-				Get average for JCS = 107 Get average for EHCache = 196
-				JCS gets took 0.54591835 times the EHCache , the goal is
-				less than 1.0x
-			</p>
-		</section>
+			<subsection name="A Test Class">
+				<p>Here is the test class:</p>
 
-		<section name="A Test Class">
-			<p>Here is the test class:</p>
-
-			<source>
-				<![CDATA[
+				<source>
+					<![CDATA[
 package org.apache.jcs;
 
 import junit.framework.TestCase;
@@ -385,7 +413,77 @@
 }
 				        
         ]]>
-			</source>
+				</source>
+			</subsection>
+		</section>
+
+
+		<section name="JCS vs EHCache Disk Cache">
+			<p>
+				It is very difficult to compare the ehcache disk store
+				and the JCS Indexed Disk Cache.
+			</p>
+			<p>The JCS version is much more sophisticated.</p>
+			<p>
+				JCS puts items into a queue called purgatory. While they
+				are in this queue, they are still accessible. This queue
+				gets worked when items are in it. The number of threads
+				used in the system as a whole for disk caches is
+				configurable using the thread pool configuration options
+				in JCS. I could have 1000 regions and only use 3 threads
+				to work the disk queues. From what I can tell EH will
+				use 1 thread per region. This is worse than the JCS
+				default, which uses a queue that kills its threads when
+				they are not used. . . . and much worse than using JCS
+				with a thread pool.
+			</p>
+			<p>
+				The size of JCS purgatory is configurable, so you can
+				avoid catastrophe if something goes wrong with the queue
+				worker. EH doesn't have any such safety.
+			</p>
+			<p>
+				JCS limits the number of keys that can be kept for the
+				disk cache. EH cannot do this.
+			</p>
+			<p>
+				The ehcache disk version is very simple. It puts an
+				unlimited number of items in a temporary store. You can
+				easily fill this up and run out of memory. You can put
+				items into JCS prugatory faster than they can be gc's
+				but it is much more difficult. The EH store is then
+				flushed to disk every 200ms. While EH is flushing the
+				entire disk cache blocks!
+			</p>
+			<p>
+				JCS disk cache is based on a continuous spooling model,
+				not a stop the world model like EH. In most cases the EH
+				model will work out, but not if you put a lot of big
+				items on disk at once. If you want an even distribution
+				of disk cache response times, then you should use JCS.
+			</p>
+			<p>
+				The EH disk store also seems to just keep growing. After
+				several tests, the size of the data file was 10 times
+				that of JCS and EH was taking 10 times as long.
+			</p>
+			<p>
+				You can saturate the EH version much more quickly, since
+				it will hold as many items as you can put in in 200 ms.
+			</p>
+			<p>
+				I tried with 100k and JCS could handle it, but EH died
+				with an out of memory exception.
+			</p>
+			<p>
+				EH cache developed its disk store in response to a bug
+				in the JCS version. This bug was fixed a few years ago .
+				. . The nice thing about JCS is that it is completely
+				pluggable. It would take about 30 minutes to plug a
+				different disk cache implementation into JCS if you so
+				pleased . . . .
+			</p>
 		</section>
+
 	</body>
 </document>



---------------------------------------------------------------------
To unsubscribe, e-mail: jcs-dev-unsubscribe@jakarta.apache.org
For additional commands, e-mail: jcs-dev-help@jakarta.apache.org