You are viewing a plain text version of this content. The canonical link for it is here.
Posted to jcs-dev@jakarta.apache.org by as...@apache.org on 2004/07/16 03:26:34 UTC

cvs commit: jakarta-turbine-jcs/src/java/org/apache/jcs/auxiliary/disk/indexed IndexedDiskCacheAttributes.java IndexedDiskCache.java IndexedDisk.java

asmuts      2004/07/15 18:26:34

  Modified:    src/java/org/apache/jcs/auxiliary/disk/indexed
                        IndexedDiskCacheAttributes.java
                        IndexedDiskCache.java IndexedDisk.java
  Log:
  recycle bin to reuse empty slots, uses sorted preferential array
  
  almost have real time optimization working nicely
  
  new config attribute for optimization configuration
  
  added shutdown 60 second window
  
  added stats
  
  no prugatory removal on get
  
  set not-spoolable if purgatory removal
  
  Revision  Changes    Path
  1.9       +27 -0     jakarta-turbine-jcs/src/java/org/apache/jcs/auxiliary/disk/indexed/IndexedDiskCacheAttributes.java
  
  Index: IndexedDiskCacheAttributes.java
  ===================================================================
  RCS file: /home/cvs/jakarta-turbine-jcs/src/java/org/apache/jcs/auxiliary/disk/indexed/IndexedDiskCacheAttributes.java,v
  retrieving revision 1.8
  retrieving revision 1.9
  diff -u -r1.8 -r1.9
  --- IndexedDiskCacheAttributes.java	12 Jun 2004 02:34:13 -0000	1.8
  +++ IndexedDiskCacheAttributes.java	16 Jul 2004 01:26:34 -0000	1.9
  @@ -35,6 +35,9 @@
       // default to 500000
       private int maxKeySize = 500000;
   
  +    // default to -1, i.e., don't optimize until shutdown
  +    private int optimizeAtRemoveCount = -1;
  +
       /**
        * Constructor for the DiskCacheAttributes object
        */
  @@ -128,6 +131,30 @@
       {
           this.maxKeySize = maxKeySize;
       }
  +
  +    /**
  +     * Gets the optimizeAtRemoveCount attribute of the DiskCacheAttributes object
  +     *
  +     * @return The optimizeAtRemoveCount value
  +     */
  +    public int getOptimizeAtRemoveCount()
  +    {
  +        return this.optimizeAtRemoveCount;
  +    }
  +
  +
  +    /**
  +     * Sets the optimizeAtRemoveCount attribute of the DiskCacheAttributes object
  +     * This number determines how often the disk cache should run real time
  +     * optimizations.
  +     *
  +     * @param name The new optimizeAtRemoveCount value
  +     */
  +    public void setOptimizeAtRemoveCount( int cnt)
  +    {
  +        this.optimizeAtRemoveCount = cnt;
  +    }
  +
   
       /**
        * Description of the Method
  
  
  
  1.14      +986 -618  jakarta-turbine-jcs/src/java/org/apache/jcs/auxiliary/disk/indexed/IndexedDiskCache.java
  
  Index: IndexedDiskCache.java
  ===================================================================
  RCS file: /home/cvs/jakarta-turbine-jcs/src/java/org/apache/jcs/auxiliary/disk/indexed/IndexedDiskCache.java,v
  retrieving revision 1.13
  retrieving revision 1.14
  diff -u -r1.13 -r1.14
  --- IndexedDiskCache.java	14 Jul 2004 03:35:09 -0000	1.13
  +++ IndexedDiskCache.java	16 Jul 2004 01:26:34 -0000	1.14
  @@ -1,6 +1,5 @@
   package org.apache.jcs.auxiliary.disk.indexed;
   
  -
   /*
    * Copyright 2001-2004 The Apache Software Foundation.
    *
  @@ -17,16 +16,19 @@
    * limitations under the License.
    */
   
  -
  -
   import java.io.File;
   import java.io.Serializable;
  +import java.io.IOException;
  +import java.util.LinkedList;
   import java.util.ConcurrentModificationException;
   import java.util.HashSet;
   import java.util.Iterator;
  +import java.util.HashMap;
   import java.util.Map;
   import java.util.Set;
   
  +import EDU.oswego.cs.dl.util.concurrent.WriterPreferenceReadWriteLock;
  +
   import org.apache.commons.logging.Log;
   import org.apache.commons.logging.LogFactory;
   import org.apache.jcs.auxiliary.disk.AbstractDiskCache;
  @@ -36,7 +38,7 @@
   import org.apache.jcs.engine.behavior.ICacheElement;
   import org.apache.jcs.engine.control.group.GroupAttrName;
   import org.apache.jcs.engine.control.group.GroupId;
  -import org.apache.jcs.utils.locking.ReadWriteLock;
  +//import org.apache.jcs.utils.locking.ReadWriteLock;
   import org.apache.jcs.utils.struct.SortedPreferentialArray;
   
   /**
  @@ -48,800 +50,1166 @@
    *
    * @version $Id$
    */
  -public class IndexedDiskCache extends AbstractDiskCache
  +public class IndexedDiskCache
  +    extends AbstractDiskCache
   {
  -    private static final Log log =
  -        LogFactory.getLog( IndexedDiskCache.class );
  +  private static final Log log =
  +      LogFactory.getLog(IndexedDiskCache.class);
   
  -    private String fileName;
  -    private IndexedDisk dataFile;
  -    private IndexedDisk keyFile;
  -    private LRUMap keyHash;
  -    private int maxKeySize;
  +  private String fileName;
  +  private IndexedDisk dataFile;
  +  private IndexedDisk keyFile;
  +  private LRUMap keyHash;
  +  private int maxKeySize;
   
  -    private File rafDir;
  +  private File rafDir;
   
  -    boolean doRecycle = false; //true;
  -    // array of empty spots
  -    private SortedPreferentialArray recycle;
  +  boolean doRecycle = true;
   
  +  // are we currenlty optimizing the files
  +  boolean isOptomizing = false;
   
  -    IndexedDiskCacheAttributes cattr;
  +  // list where puts made during optimization are made, may need a removeList too
  +  private LinkedList optimizingPutList = new LinkedList();
   
  -    /**
  -     * Each instance of a Disk cache should use this lock to synchronize reads
  -     * and writes to the underlying storage mechansism.
  -     */
  -    protected ReadWriteLock storageLock = new ReadWriteLock();
  -
  -    /**
  -     * Constructor for the DiskCache object
  -     *
  -     * @param cattr
  -     */
  -    public IndexedDiskCache( IndexedDiskCacheAttributes cattr )
  -    {
  -        super( cattr.getCacheName() );
  +  // RECYLCE BIN -- array of empty spots
  +  private SortedPreferentialArray recycle;
   
  -        String cacheName = cattr.getCacheName();
  -        String rootDirName = cattr.getDiskPath();
  -        maxKeySize = cattr.getMaxKeySize();
  +  IndexedDiskCacheAttributes cattr;
   
  -        this.cattr = cattr;
  +  // used for counting the number of requests
  +  private int optCnt = 0;
   
  -        this.fileName = cacheName;
  +  private int recycleCnt = 0;
   
  -        rafDir = new File( rootDirName );
  -        rafDir.mkdirs();
  +  /**
  +   * Each instance of a Disk cache should use this lock to synchronize reads
  +   * and writes to the underlying storage mechansism.
  +   */
  +  //protected ReadWriteLock storageLock = new ReadWriteLock();
   
  -        log.info( "Cache file root directory: " + rootDirName );
  +  protected WriterPreferenceReadWriteLock storageLock = new
  +      WriterPreferenceReadWriteLock();
   
  -        try
  -        {
  -            dataFile = new IndexedDisk(
  -                new File( rafDir, fileName + ".data" ) );
  +  /**
  +   * Constructor for the DiskCache object
  +   *
  +   * @param cattr
  +   */
  +  public IndexedDiskCache(IndexedDiskCacheAttributes cattr)
  +  {
  +    super(cattr.getCacheName());
   
  -            keyFile = new IndexedDisk(
  -                new File( rafDir, fileName + ".key" ) );
  +    String cacheName = cattr.getCacheName();
  +    String rootDirName = cattr.getDiskPath();
  +    maxKeySize = cattr.getMaxKeySize();
   
  -            // If the key file has contents, try to initialize the keys
  -            // from it. In no keys are loaded reset the data file.
  +    this.cattr = cattr;
   
  -            if ( keyFile.length() > 0 )
  -            {
  -                loadKeys();
  +    this.fileName = cacheName;
   
  -                if ( keyHash.size() == 0 )
  -                {
  -                    dataFile.reset();
  -                }
  -            }
  +    rafDir = new File(rootDirName);
  +    rafDir.mkdirs();
   
  -            // Otherwise start with a new empty map for the keys, and reset
  -            // the data file if it has contents.
  +    log.info("Cache file root directory: " + rootDirName);
   
  -            else
  -            {
  -                keyHash = new LRUMap( maxKeySize );
  +    try
  +    {
  +      dataFile = new IndexedDisk(
  +          new File(rafDir, fileName + ".data"));
   
  -                if ( dataFile.length() > 0 )
  -                {
  -                    dataFile.reset();
  -                }
  -            }
  +      keyFile = new IndexedDisk(
  +          new File(rafDir, fileName + ".key"));
   
  -            // TODO, make a new size parameter for this.
  -            recycle = new SortedPreferentialArray( maxKeySize );
  +      // If the key file has contents, try to initialize the keys
  +      // from it. In no keys are loaded reset the data file.
   
  -            // Initialization finished successfully, so set alive to true.
  +      if (keyFile.length() > 0)
  +      {
  +        loadKeys();
   
  -            alive = true;
  -        }
  -        catch ( Exception e )
  +        if (keyHash.size() == 0)
           {
  -            log.error( "Failure initializing for fileName: " + fileName
  -                + " and root directory: " + rootDirName, e );
  +          dataFile.reset();
           }
  -    }
  +      }
   
  -    /**
  -     * Description of the Method
  -     */
  -    private void loadKeys()
  -        throws InterruptedException
  -    {
  -        storageLock.writeLock();
  +      // Otherwise start with a new empty map for the keys, and reset
  +      // the data file if it has contents.
   
  -        if ( log.isInfoEnabled() )
  +      else
  +      {
  +        keyHash = new LRUMap(maxKeySize);
  +
  +        if (dataFile.length() > 0)
           {
  -          log.info( "loading keys for " + keyFile.toString() );
  +          dataFile.reset();
           }
  +      }
   
  -        try
  -        {
  -            keyHash = ( LRUMap ) keyFile.readObject( 0 );
  +      // TODO, make a new size parameter for this.
  +      recycle = new SortedPreferentialArray(maxKeySize);
   
  -            if ( keyHash == null )
  -            {
  -                keyHash = new LRUMap( maxKeySize );
  -            }
  -            else
  -            {
  -                if ( log.isInfoEnabled() )
  -                {
  -                  log.info( "Loaded keys from: " + fileName +
  -                          ", key count: " + keyHash.size() );
  -                }
  +      // Initialization finished successfully, so set alive to true.
   
  -                keyHash.setMaximumSize( maxKeySize );
  -                if ( log.isInfoEnabled() )
  -                {
  -                  log.info( "Reset maxKeySize to: '" + maxKeySize + "'" );
  -                }
  -            }
  +      alive = true;
  +    }
  +    catch (Exception e)
  +    {
  +      log.error("Failure initializing for fileName: " + fileName
  +                + " and root directory: " + rootDirName, e);
  +    }
  +  }
   
  -            if ( log.isDebugEnabled() )
  -            {
  -                Iterator itr = keyHash.entrySet().iterator();
  -                while ( itr.hasNext() )
  -                {
  -                   Map.Entry e = (Map.Entry)itr.next();
  -                   String key = (String)e.getKey();
  -                   IndexedDiskElementDescriptor de = (IndexedDiskElementDescriptor)e.getValue();
  -                   log.debug( "key entry: " + key + ", ded.pos" + de.pos + ", ded.len" + de.len );
  -                }
  -            }
  +  /**
  +   * Loads the keys from the .key file.  The keys are stored in a HashMap on
  +   * disk.  This is converted into a LRUMap.
  +   */
  +  private void loadKeys() throws InterruptedException
  +  {
  +    //storageLock.writeLock();
  +    storageLock.writeLock().acquire();
  +
  +    if (log.isInfoEnabled())
  +    {
  +      log.info("loading keys for " + keyFile.toString());
  +    }
  +
  +    try
  +    {
  +
  +      keyHash = new LRUMap(maxKeySize);
  +      HashMap keys = (HashMap) keyFile.readObject(0);
   
  +      if (keys != null)
  +      {
  +        keyHash.putAll(keys);
  +
  +        if (log.isInfoEnabled())
  +        {
  +          log.info("Loaded keys from: " + fileName +
  +                   ", key count: " + keyHash.size());
           }
  -        catch ( Exception e )
  +
  +        keyHash.setMaximumSize(maxKeySize);
  +        if (log.isInfoEnabled())
           {
  -            log.error( fileName, e );
  +          log.info("Reset maxKeySize to: '" + maxKeySize + "'");
           }
  -        finally
  +      }
  +
  +      if (log.isDebugEnabled())
  +      {
  +        Iterator itr = keyHash.entrySet().iterator();
  +        while (itr.hasNext())
           {
  -            storageLock.done();
  +          Map.Entry e = (Map.Entry) itr.next();
  +          String key = (String) e.getKey();
  +          IndexedDiskElementDescriptor de = (IndexedDiskElementDescriptor) e.
  +              getValue();
  +          log.debug("key entry: " + key + ", ded.pos" + de.pos + ", ded.len" +
  +                    de.len);
           }
  +      }
  +
  +    }
  +    catch (Exception e)
  +    {
  +      log.error(fileName, e);
       }
  +    finally
  +    {
  +      //storageLock.done();
  +      storageLock.writeLock().release();
  +    }
  +  }
   
  -    /**
  -     * Saves key file to disk
  -     */
  -    private void saveKeys()
  +  /**
  +   * Saves key file to disk.  This converts the LRUMap to a HashMap for deserialzation.
  +   */
  +  private void saveKeys()
  +  {
  +    try
       {
  -        try
  -        {
  -            if ( log.isDebugEnabled() )
  -            {
  -                log.debug( "Saving keys to: " + fileName +
  -                    ", key count: " + keyHash.size() );
  -            }
  +      if (log.isDebugEnabled())
  +      {
  +        log.debug("Saving keys to: " + fileName +
  +                  ", key count: " + keyHash.size());
  +      }
   
  -            storageLock.writeLock();
  +      //storageLock.writeLock();
  +      //storageLock.writeLock().acquire();
   
  -            try
  -            {
  -                keyFile.reset();
  +      try
  +      {
  +        keyFile.reset();
   
  -                if ( keyHash.size() > 0 )
  -                {
  -                    keyFile.writeObject( keyHash, 0 );
  -                }
  -            }
  -            finally
  -            {
  -                storageLock.done();
  -            }
  -        }
  -        catch ( Exception e )
  +        HashMap keys = new HashMap();
  +        keys.putAll(keyHash);
  +
  +        if (keys.size() > 0)
           {
  -            log.error( e );
  +          keyFile.writeObject(keys, 0);
           }
  +      }
  +      finally
  +      {
  +        //storageLock.done();
  +        //storageLock.writeLock().release();
  +
  +      }
  +    }
  +    catch (Exception e)
  +    {
  +      log.error(e);
       }
  +  }
   
  -    /**
  -     * Update the disk cache. Called from the Queue. Makes sure the Item has not
  -     * been retireved from purgatory while in queue for disk. Remove items from
  -     * purgatory when they go to disk.
  -     */
  -    public void doUpdate( ICacheElement ce )
  +  /**
  +   * Update the disk cache. Called from the Queue. Makes sure the Item has not
  +   * been retireved from purgatory while in queue for disk. Remove items from
  +   * purgatory when they go to disk.
  +   */
  +  public void doUpdate(ICacheElement ce)
  +  {
  +
  +    if (log.isDebugEnabled())
       {
  -        if ( log.isDebugEnabled() )
  -        {
  -            log.debug( "Storing element on disk, key: " + ce.getKey() );
  -        }
  +      log.debug("Storing element on disk, key: " + ce.getKey());
  +    }
   
  -        IndexedDiskElementDescriptor ded = null;
  +    IndexedDiskElementDescriptor ded = null;
   
  -        try
  +    try
  +    {
  +      ded = new IndexedDiskElementDescriptor();
  +      byte[] data = IndexedDisk.serialize(ce);
  +
  +      // make sure this only locks for one particular cache region
  +      //storageLock.writeLock();
  +      storageLock.writeLock().acquire();
  +
  +      ded.init(dataFile.length(), data);
  +
  +      try
  +      {
  +        if (!alive)
           {
  -            ded = new IndexedDiskElementDescriptor();
  -            byte[] data = IndexedDisk.serialize( ce );
  -            ded.init( dataFile.length(), data );
  +          return;
  +        }
   
  -            // make sure this only locks for one particular cache region
  -            storageLock.writeLock();
  +        IndexedDiskElementDescriptor old =
  +            (IndexedDiskElementDescriptor)
  +            keyHash.put(ce.getKey(), ded);
   
  -            try
  +        // Item with the same key already exists in file.
  +        // Try to reuse the location if possible.
  +        if (old != null && ded.len <= old.len)
  +        {
  +          ded.pos = old.pos;
  +        }
  +        else
  +        {
  +          if (doRecycle)
  +          {
  +            IndexedDiskElementDescriptor rep =
  +                (IndexedDiskElementDescriptor) recycle.takeNearestLargerOrEqual(
  +                ded);
  +            if (rep != null)
               {
  -                if ( !alive )
  -                {
  -                    return;
  -                }
  -
  -                IndexedDiskElementDescriptor old =
  -                    ( IndexedDiskElementDescriptor )
  -                        keyHash.put( ce.getKey(), ded );
  -
  -                // Item with the same key already exists in file.
  -                // Try to reuse the location if possible.
  -                if ( old != null && ded.len <= old.len )
  -                {
  -                  ded.pos = old.pos;
  -                } else {
  -                  if ( doRecycle )
  -                  {
  -                    IndexedDiskElementDescriptor rep =
  -                        (IndexedDiskElementDescriptor)recycle.takeNearestLargerOrEqual(ded);
  -                    if ( rep != null )
  -                    {
  -                      ded.pos = rep.pos;
  -                      if (log.isInfoEnabled())
  -                      {
  -                        log.info("using recycled ded " + ded.pos +
  -                                 " rep.len = " + rep.len + " ded.len = " +
  -                                 ded.len);
  -                      }
  -                    } else {
  -                      if (log.isInfoEnabled())
  -                      {
  -                        log.info("no ded to recycle" );
  -                      }
  +              ded.pos = rep.pos;
  +              recycleCnt++;
  +              if (log.isDebugEnabled())
  +              {
   
  -                    }
  -                  }
  -                }
  -                dataFile.write( data, ded.pos );
  +                log.debug("using recycled ded " + ded.pos +
  +                          " rep.len = " + rep.len + " ded.len = " +
  +                          ded.len);
  +              }
               }
  -            finally
  +            else
               {
  -                storageLock.done();
  -            }
  +              if (log.isDebugEnabled())
  +              {
  +                log.debug("no ded to recycle");
  +              }
   
  -            if ( log.isDebugEnabled() )
  -            {
  -                log.debug( "Put to file: " + fileName +
  -                           ", key: " + ce.getKey() +
  -                           ", position: " + ded.pos +
  -                           ", size: " + ded.len );
               }
  +          }
           }
  -        catch ( ConcurrentModificationException cme )
  -        {
  -            // do nothing, this means it has gone back to memory mid serialization
  -        }
  -        catch ( Exception e )
  +        dataFile.write(data, ded.pos);
  +
  +        if (this.isOptomizing)
           {
  -            log.error( "Failure updating element, cacheName: " + cacheName +
  -                       ", key: " + ce.getKey(), e );
  +          optimizingPutList.addLast(ce.getKey());
  +          if (log.isDebugEnabled())
  +          {
  +            log.debug("added to optimizing put list." + optimizingPutList.size());
  +          }
           }
  -        return;
  -    }
   
  -    /**
  -     * @see AbstractDiskCache#doGet
  -     */
  -    protected ICacheElement doGet( Serializable key )
  +      }
  +      finally
  +      {
  +        //storageLock.done();
  +        storageLock.writeLock().release();
  +      }
  +
  +      if (log.isDebugEnabled())
  +      {
  +        log.debug("Put to file: " + fileName +
  +                  ", key: " + ce.getKey() +
  +                  ", position: " + ded.pos +
  +                  ", size: " + ded.len);
  +      }
  +    }
  +    catch (ConcurrentModificationException cme)
       {
  -        if ( log.isDebugEnabled() )
  -        {
  -            log.debug( "Trying to get from disk: " + key );
  -        }
  +      // do nothing, this means it has gone back to memory mid serialization
  +    }
  +    catch (Exception e)
  +    {
  +      log.error("Failure updating element, cacheName: " + cacheName +
  +                ", key: " + ce.getKey(), e);
  +    }
  +    return;
  +  }
   
  -        ICacheElement object = null;
  +  /**
  +   * @see AbstractDiskCache#doGet
  +   */
  +  protected ICacheElement doGet(Serializable key)
  +  {
   
  -        try
  -        {
  -            storageLock.readLock();
  +    if (log.isDebugEnabled())
  +    {
  +      log.debug("Trying to get from disk: " + key);
  +    }
   
  -            if ( !alive )
  -            {
  -                log.debug( "No longer alive so returning null, cacheName: " +
  -                           cacheName + ", key = " + key );
  +    ICacheElement object = null;
   
  -                return null;
  -            }
  +    try
  +    {
  +      //storageLock.readLock();
  +      storageLock.readLock().acquire();
   
  -            object = readElement( key );
  +      if (!alive)
  +      {
  +        log.debug("No longer alive so returning null, cacheName: " +
  +                  cacheName + ", key = " + key);
   
  -        }
  -        catch ( Exception e )
  -        {
  -            log.error( "Failure getting from disk, cacheName: " + cacheName +
  -                       ", key = " + key, e );
  -        }
  -        finally
  -        {
  -            storageLock.done();
  -        }
  +        return null;
  +      }
  +
  +      object = readElement(key);
   
  -        return object;
       }
  +    catch (Exception e)
  +    {
  +      log.error("Failure getting from disk, cacheName: " + cacheName +
  +                ", key = " + key, e);
  +    }
  +    finally
  +    {
  +      //storageLock.done();
  +      storageLock.readLock().release();
  +    }
  +
  +    return object;
  +  }
   
  -    private CacheElement readElement( Serializable key )
  -        throws Exception
  +  private CacheElement readElement(Serializable key) throws IOException
  +  {
  +    CacheElement object = null;
  +
  +    IndexedDiskElementDescriptor ded =
  +        (IndexedDiskElementDescriptor) keyHash.get(key);
  +
  +    if (ded != null)
       {
  -        CacheElement object = null;
  +      if (log.isDebugEnabled())
  +      {
  +        log.debug("Found on disk, key: " + key);
  +      }
  +      try
  +      {
  +        object = (CacheElement) dataFile.readObject(ded.pos);
  +      }
  +      catch (IOException e)
  +      {
  +        log.error("Problem reading object from file");
  +        throw e;
  +      }
   
  -        IndexedDiskElementDescriptor ded =
  -            ( IndexedDiskElementDescriptor ) keyHash.get( key );
  +    }
   
  -        if ( ded != null )
  -        {
  -            if ( log.isDebugEnabled() )
  -            {
  -                log.debug( "Found on disk, key: " + key );
  -            }
  +    return object;
  +  }
   
  -            object = ( CacheElement ) dataFile.readObject( ded.pos );
  +  public Set getGroupKeys(String groupName)
  +  {
  +    GroupId groupId = new GroupId(cacheName, groupName);
  +    HashSet keys = new HashSet();
  +    try
  +    {
  +      //storageLock.readLock();
  +      storageLock.readLock().acquire();
  +
  +      for (Iterator itr = keyHash.keySet().iterator(); itr.hasNext(); )
  +      {
  +        //Map.Entry entry = (Map.Entry) itr.next();
  +        //Object k = entry.getKey();
  +        Object k = itr.next();
  +        if (k instanceof GroupAttrName
  +            && ( (GroupAttrName) k).groupId.equals(groupId))
  +        {
  +          keys.add( ( (GroupAttrName) k).attrName);
           }
  +      }
  +    }
  +    catch (Exception e)
  +    {
  +      log.error("Failure getting from disk, cacheName: " + cacheName +
  +                ", group = " + groupName, e);
  +    }
  +    finally
  +    {
  +      //storageLock.done();
  +      storageLock.readLock().release();
  +    }
   
  -        return object;
  +    return keys;
  +  }
  +
  +  /**
  +   * Returns true if the removal was succesful; or false if there is nothing
  +   * to remove. Current implementation always result in a disk orphan.
  +   *
  +   * @return
  +   * @param key
  +   */
  +  public boolean doRemove(Serializable key)
  +  {
  +
  +    optCnt++;
  +    if (!this.isOptomizing && optCnt == this.cattr.getOptimizeAtRemoveCount())
  +    {
  +      doOptimizeRealTime();
  +      if (log.isInfoEnabled())
  +      {
  +        log.info("optCnt = " + optCnt);
  +      }
       }
   
  -    public Set getGroupKeys(String groupName)
  +    boolean removed = false;
  +    try
       {
  -        GroupId groupId = new GroupId(cacheName, groupName);
  -        HashSet keys = new HashSet();
  -        try
  +      //storageLock.writeLock();
  +      storageLock.writeLock().release();
  +
  +      if (key instanceof String
  +          && key.toString().endsWith(CacheConstants.NAME_COMPONENT_DELIMITER))
  +      {
  +        // remove all keys of the same name group.
  +
  +        Iterator iter = keyHash.entrySet().iterator();
  +
  +        while (iter.hasNext())
           {
  -            storageLock.readLock();
  +          Map.Entry entry = (Map.Entry) iter.next();
  +
  +          Object k = entry.getKey();
   
  -            for (Iterator itr = keyHash.keySet().iterator(); itr.hasNext();)
  +          if (k instanceof String
  +              && k.toString().startsWith(key.toString()))
  +          {
  +
  +            if (doRecycle)
               {
  -                //Map.Entry entry = (Map.Entry) itr.next();
  -                //Object k = entry.getKey();
  -                Object k = itr.next();
  -                if ( k instanceof GroupAttrName
  -                     && ((GroupAttrName)k).groupId.equals(groupId) )
  +              // reuse the spot
  +              IndexedDiskElementDescriptor ded =
  +                  (IndexedDiskElementDescriptor) keyHash.get(key);
  +              if (ded != null)
  +              {
  +                recycle.add(ded);
  +                if (log.isDebugEnabled())
                   {
  -                    keys.add(((GroupAttrName)k).attrName);
  +                  log.debug("recycling ded " + ded);
                   }
  +              }
               }
  +
  +            iter.remove();
  +            removed = true;
  +          }
           }
  -        catch ( Exception e )
  +        return removed;
  +      }
  +      else if (key instanceof GroupId)
  +      {
  +        // remove all keys of the same name hierarchy.
  +        Iterator iter = keyHash.entrySet().iterator();
  +        while (iter.hasNext())
  +        {
  +          Map.Entry entry = (Map.Entry) iter.next();
  +          Object k = entry.getKey();
  +
  +          if (k instanceof GroupAttrName
  +              && ( (GroupAttrName) k).groupId.equals(key))
  +          {
  +            if (doRecycle)
  +            {
  +              // reuse the spot
  +              IndexedDiskElementDescriptor ded =
  +                  (IndexedDiskElementDescriptor) keyHash.get(key);
  +              if (ded != null)
  +              {
  +                recycle.add(ded);
  +                if (log.isDebugEnabled())
  +                {
  +                  log.debug("recycling ded " + ded);
  +                }
  +              }
  +            }
  +
  +            iter.remove();
  +            removed = true;
  +          }
  +        }
  +      }
  +      else
  +      {
  +
  +        if (log.isDebugEnabled())
           {
  -            log.error( "Failure getting from disk, cacheName: " + cacheName +
  -                       ", group = " + groupName, e );
  +          log.debug("Disk removal: Removed from key hash, key " + key);
           }
  -        finally
  +
  +        if (doRecycle)
           {
  -            storageLock.done();
  +          // reuse the spot
  +          IndexedDiskElementDescriptor ded =
  +              (IndexedDiskElementDescriptor) keyHash.get(key);
  +          if (ded != null)
  +          {
  +            recycle.add(ded);
  +            if (log.isDebugEnabled())
  +            {
  +              log.debug("recycling ded " + ded);
  +            }
  +          }
           }
   
  -        return keys;
  +        // remove single item.
  +        return keyHash.remove(key) != null;
  +
  +      }
  +    }
  +    catch (Exception e)
  +    {
  +      log.error(e);
  +      reset();
  +    }
  +    finally
  +    {
  +      //storageLock.done();
  +      storageLock.writeLock().release();
       }
   
  -    /**
  -     * Returns true if the removal was succesful; or false if there is nothing
  -     * to remove. Current implementation always result in a disk orphan.
  -     *
  -     * @return
  -     * @param key
  -     */
  -    public boolean doRemove( Serializable key )
  +    return false;
  +  }
  +
  +  /**
  +   * Description of the Method
  +   */
  +  public void doRemoveAll()
  +  {
  +    try
       {
  -        boolean removed = false;
  -        try
  -        {
  -            storageLock.writeLock();
  +      reset();
  +    }
  +    catch (Exception e)
  +    {
  +      log.error(e);
  +      reset();
  +    }
  +    finally
  +    {
  +    }
  +  }
   
  -            if ( key instanceof String
  -                 && key.toString().endsWith( CacheConstants.NAME_COMPONENT_DELIMITER ) )
  -            {
  -                // remove all keys of the same name group.
  +  // end removeAll
   
  -                Iterator iter = keyHash.entrySet().iterator();
  +  /**
  +   * handle error by last resort, force content update, or removeall
  +   */
  +  private void reset()
  +  {
  +    log.debug("Reseting cache");
   
  -                while ( iter.hasNext() )
  -                {
  -                    Map.Entry entry = ( Map.Entry ) iter.next();
  +    try
  +    {
  +      //storageLock.writeLock();
  +      storageLock.writeLock().acquire();
   
  -                    Object k = entry.getKey();
  +      dataFile.close();
  +      File file = new File(rafDir, fileName + ".data");
  +      file.delete();
   
  -                    if ( k instanceof String
  -                         && k.toString().startsWith( key.toString() ) )
  -                    {
  -
  -                      if ( doRecycle ) {
  -                        // reuse the spot
  -                        IndexedDiskElementDescriptor ded =
  -                            (IndexedDiskElementDescriptor) keyHash.get(key);
  -                        if (ded != null)
  -                        {
  -                          recycle.add(ded);
  -                          if (log.isDebugEnabled())
  -                          {
  -                            log.debug("recycling ded " + ded);
  -                          }
  -                        }
  -                      }
  -
  -                        iter.remove();
  -                        removed = true;
  -                    }
  -                }
  -                return removed;
  -            }
  -            else if ( key instanceof GroupId )
  -            {
  -                // remove all keys of the same name hierarchy.
  -                Iterator iter = keyHash.entrySet().iterator();
  -                while ( iter.hasNext() )
  -                {
  -                    Map.Entry entry = (Map.Entry) iter.next();
  -                    Object k = entry.getKey();
  +      keyFile.close();
  +      File file2 = new File(rafDir, fileName + ".key");
  +      file2.delete();
   
  -                    if ( k instanceof GroupAttrName
  -                         && ((GroupAttrName)k).groupId.equals(key) )
  -                    {
  -                      if ( doRecycle ) {
  -                        // reuse the spot
  -                        IndexedDiskElementDescriptor ded =
  -                            (IndexedDiskElementDescriptor) keyHash.get(key);
  -                        if (ded != null)
  -                        {
  -                          recycle.add(ded);
  -                          if (log.isDebugEnabled())
  -                          {
  -                            log.debug("recycling ded " + ded);
  -                          }
  -                        }
  -                      }
  -
  -                        iter.remove();
  -                        removed = true;
  -                    }
  -                }
  -            }
  -            else
  -            {
  +      dataFile =
  +          new IndexedDisk(new File(rafDir, fileName + ".data"));
   
  -                if ( log.isDebugEnabled() )
  -                {
  -                    log.debug( "Disk removal: Removed from key hash, key " + key );
  -                }
  +      keyFile =
  +          new IndexedDisk(new File(rafDir, fileName + ".key"));
   
  -                if ( doRecycle ) {
  -                  // reuse the spot
  -                  IndexedDiskElementDescriptor ded =
  -                      (IndexedDiskElementDescriptor) keyHash.get(key);
  -                  if (ded != null)
  -                  {
  -                    recycle.add(ded);
  -                    if (log.isDebugEnabled())
  -                    {
  -                      log.debug("recycling ded " + ded);
  -                    }
  -                  }
  -                }
  +      recycle = null;
  +      recycle = new SortedPreferentialArray(maxKeySize);
   
  -                // remove single item.
  -                return keyHash.remove( key ) != null;
  +      keyHash = null;
  +      keyHash = new LRUMap(this.maxKeySize);
  +    }
  +    catch (Exception e)
  +    {
  +      log.error("Failure reseting state", e);
  +    }
  +    finally
  +    {
  +      //storageLock.done();
  +      storageLock.writeLock().release();
  +    }
  +  }
   
  -            }
  -        }
  -        catch ( Exception e )
  -        {
  -            log.error( e );
  -            reset();
  -        }
  -        finally
  -        {
  -            storageLock.done();
  -        }
  +  /**
  +   * Dispose of the disk cache in a background thread.  Joins against this
  +   * thread to put a cap on the disposal time.
  +   */
  +  public void doDispose()
  +  {
  +    Runnable disR = new Runnable()
  +    {
  +      public void run()
  +      {
  +        disposeInternal();
  +      }
  +    };
  +    Thread t = new Thread(disR);
  +    t.start();
  +    // wait up to 60 seconds for dispose and then quit if not done.
  +    try
  +    {
  +      t.join(60 * 1000);
  +    }
  +    catch (InterruptedException ex)
  +    {
  +      log.error(ex);
  +    }
  +  }
   
  -        return false;
  +  /**
  +   * Internal method that handles the disposal.
  +   */
  +  private void disposeInternal()
  +  {
  +    try
  +    {
  +      //storageLock.writeLock();
  +      storageLock.writeLock().acquire();
  +
  +      if (!alive)
  +      {
  +        log.debug("Not alive and dispose was called, filename: " +
  +                  fileName);
  +        return;
  +      }
  +
  +      try
  +      {
  +        optimizeFile();
  +      }
  +      catch (Exception e)
  +      {
  +        log.error(fileName, e);
  +      }
  +      try
  +      {
  +        log.warn("Closing files, base filename: " + fileName);
  +        dataFile.close();
  +        dataFile = null;
  +        keyFile.close();
  +        keyFile = null;
  +      }
  +      catch (Exception e)
  +      {
  +        log.error("Failure closing files in dispose, filename: " +
  +                  fileName, e);
  +      }
  +    }
  +    catch (Exception e)
  +    {
  +      log.error("Failure in dispose", e);
       }
  +    finally
  +    {
  +      alive = false;
   
  -    /**
  -     * Description of the Method
  -     */
  -    public void doRemoveAll()
  +      try
  +      {
  +        //storageLock.done();
  +        storageLock.writeLock().release();
  +      }
  +      catch (Exception e)
  +      {
  +        log.error("Failure releasing lock on shutdown " + e);
  +      }
  +    }
  +
  +  }
  +
  +/////////////////////////////////////////////////////////////////////////////////
  +// OPTIMIZATION METHODS
  +
  +  /**
  +   * Dispose of the disk cache in a background thread.  Joins against this
  +   * thread to put a cap on the disposal time.
  +   */
  +  public synchronized void doOptimizeRealTime()
  +  {
  +    if (!this.isOptomizing)
       {
  -        try
  +      this.isOptomizing = true;
  +      Runnable optR = new Runnable()
  +      {
  +        public void run()
           {
  -            reset();
  +          optimizeRealTime();
           }
  -        catch ( Exception e )
  +      };
  +      Thread t = new Thread(optR);
  +      t.start();
  +    }
  +    /*
  +        // wait up to 60 seconds for dispose and then quit if not done.
  +        try
           {
  -            log.error( e );
  -            reset();
  +          t.join(60 * 1000);
           }
  -        finally
  +        catch (InterruptedException ex)
           {
  +          log.error(ex);
           }
  -    }
  -    // end removeAll
  -
  -    /**
  -     * handle error by last resort, force content update, or removeall
        */
  -    private void reset()
  -    {
  -        log.debug( "Reseting cache" );
  -
  -        try
  -        {
  -            storageLock.writeLock();
  +  }
   
  -            dataFile.close();
  -            File file = new File( rafDir, fileName + ".data" );
  -            file.delete();
  +  private int timesOptimized = 0;
   
  -            keyFile.close();
  -            File file2 = new File( rafDir, fileName + ".key" );
  -            file2.delete();
  +  /**
  +   * Realtime optimization is handled by this method.  It works in this way:
  +   *
  +   * 1.  lock the active file, create a new file
  +   * 2.  copy the keys for iteration
  +   * 3.  for each key in the copy, make sure it is still in the active keyhasH
  +   *     to prevent putting items on disk that have been removed.  It also checks the
  +   *     new keyHash to make sure that a newer version hasn't already been put.
  +   * 4.  Write the element for the key copy to disk in the normal proceedure.
  +   * 5.  All gets will be serviced by the new file.
  +   * 6.  All puts are made on the new file.
  +   *
  +   */
  +  private void optimizeRealTime()
  +  {
   
  -            dataFile =
  -                new IndexedDisk( new File( rafDir, fileName + ".data" ) );
  +    long start = System.currentTimeMillis();
  +    if (log.isInfoEnabled())
  +    {
  +      log.info("Beginning Real Time Optimization #" + ++timesOptimized);
  +    }
   
  -            keyFile =
  -                new IndexedDisk( new File( rafDir, fileName + ".key" ) );
  +    Object[] keys = null;
   
  -            recycle = null;
  -            recycle = new SortedPreferentialArray( maxKeySize );
  +    try
  +    {
  +      //storageLock.readLock();
  +      storageLock.readLock().acquire();
  +      try
  +      {
  +        keys = keyHash.keySet().toArray();
  +      }
  +      finally
  +      {
  +        //storageLock.done();
  +        storageLock.readLock().release();
  +      }
   
  -            keyHash = null;
  -            keyHash = new LRUMap( this.maxKeySize );
  -        }
  -        catch ( Exception e )
  +      LRUMap keyHashTemp = new LRUMap(this.maxKeySize);
  +      keyHashTemp.tag = "Round=" + timesOptimized;
  +      IndexedDisk dataFileTemp =
  +          new IndexedDisk(new File(rafDir, fileName + "Temp.data"));
  +      //dataFileTemp.reset();
  +
  +      // set flag to true
  +      isOptomizing = true;
  +
  +      int len = keys.length;
  +      //while ( itr.hasNext() )
  +      if (log.isInfoEnabled())
  +      {
  +        log.info("Optimizing RT -- TempKeys, length = " + len);
  +      }
  +      for (int i = 0; i < len; i++)
  +      {
  +        // lock so no more gets to the queue -- optimizingPutList
  +        //storageLock.writeLock();
  +        storageLock.writeLock().acquire();
  +        try
           {
  -            log.error( "Failure reseting state", e );
  +          //Serializable key = ( Serializable ) itr.next();
  +          Serializable key = (Serializable) keys[i];
  +          this.moveKeyDataToTemp(key, keyHashTemp, dataFileTemp);
           }
           finally
           {
  -            storageLock.done();
  +          //storageLock.done();
  +          storageLock.writeLock().release();
           }
  -    }
  +      }
   
  -    /**
  -     * Description of the Method
  -     */
  -    public void doDispose()
  -    {
  -        try
  +      // potentially, this will cause the longest delay
  +      // lock so no more gets to the queue -- optimizingPutList
  +      //storageLock.writeLock();
  +      storageLock.writeLock().acquire();
  +      try
  +      {
  +        // switch primary and do the same for those on the list
  +        if (log.isInfoEnabled())
           {
  -            storageLock.writeLock();
  -
  -            if ( !alive )
  -            {
  -                log.debug( "Not alive and dispose was called, filename: " +
  -                    fileName );
  -                return;
  -            }
  -
  -            try
  -            {
  -                optimizeFile();
  -            }
  -            catch ( Exception e )
  -            {
  -                log.error( fileName, e );
  -            }
  -            try
  -            {
  -                log.warn( "Closing files, base filename: " + fileName );
  -                dataFile.close();
  -                dataFile = null;
  -                keyFile.close();
  -                keyFile = null;
  -            }
  -            catch ( Exception e )
  -            {
  -                log.error( "Failure closing files in dispose, filename: " +
  -                    fileName, e );
  -            }
  +          log.info("Optimizing RT -- PutList, size = " + optimizingPutList.size());
           }
  -        catch ( Exception e )
  +
  +        while (optimizingPutList.size() > 0)
           {
  -            log.error( "Failure in dispose", e );
  +          Serializable key = (Serializable) optimizingPutList.removeFirst();
  +          this.moveKeyDataToTemp(key, keyHashTemp, dataFileTemp);
           }
  -        finally
  +        if (log.isInfoEnabled())
           {
  -            alive = false;
  +          log.info("keyHashTemp, size = " + keyHashTemp.size());
  +        }
   
  -            try
  -            {
  -              storageLock.done();
  -            } catch ( Exception e )
  -            {
  -                log.error( "Failure releasing lock on shutdown " + e );
  -            }
  +        // switch files.
  +        // main
  +        if (log.isInfoEnabled())
  +        {
  +          log.info("Optimizing RT -- Replacing Files");
           }
  -    }
  +        tempToPrimary(keyHashTemp, dataFileTemp);
  +      }
  +      finally
  +      {
  +        //storageLock.done();
  +        storageLock.writeLock().release();
  +      }
   
  -    /**
  -     * Note: synchronization currently managed by the only caller method -
  -     * dispose.
  -     */
  -    private void optimizeFile()
  +    }
  +    catch (Exception e)
       {
  -        try
  -        {
  -            // Migrate from keyHash to keyHshTemp in memory,
  -            // and from dataFile to dataFileTemp on disk.
  -            LRUMap keyHashTemp = new LRUMap( this.maxKeySize );
  +      log.error("Failure Optimizing RealTime, cacheName: " + cacheName, e);
  +    }
  +    optCnt = 0;
  +    isOptomizing = false;
   
  -            IndexedDisk dataFileTemp =
  -                new IndexedDisk( new File( rafDir, fileName + "Temp.data" ) );
  +    long end = System.currentTimeMillis();
  +    long time = end - start;
  +    if (log.isInfoEnabled())
  +    {
  +      log.info("Finished #" + timesOptimized + " Real Time Optimization in " +
  +               time + " millis.");
  +    }
   
  -            if ( log.isInfoEnabled() )
  -            {
  -                log.info( "Optomizing file keyHash.size()=" + keyHash.size() );
  -            }
  +  }
   
  -            //Iterator itr = keyHash.keySet().iterator();
  +  /**
  +   * Note: synchronization currently managed by the only caller method -
  +   * dispose.
  +   */
  +  private void optimizeFile()
  +  {
  +    try
  +    {
  +      // Migrate from keyHash to keyHshTemp in memory,
  +      // and from dataFile to dataFileTemp on disk.
  +      LRUMap keyHashTemp = new LRUMap(this.maxKeySize);
  +
  +      IndexedDisk dataFileTemp =
  +          new IndexedDisk(new File(rafDir, fileName + "Temp.data"));
  +      //dataFileTemp.reset();
   
  -            Object[] keys = keyHash.keySet().toArray();
  -            int len = keys.length;
  +      if (log.isInfoEnabled())
  +      {
  +        log.info("Optomizing file keyHash.size()=" + keyHash.size());
  +      }
   
  -            //while ( itr.hasNext() )
  -            for ( int i = 0; i < len; i++ )
  -            {
  -                //Serializable key = ( Serializable ) itr.next();
  -                Serializable key = ( Serializable ) keys[i];
  +      //Iterator itr = keyHash.keySet().iterator();
   
  -                CacheElement tempDe = readElement( key );
  -                try
  -                {
  -                    //IndexedDiskElementDescriptor de =
  -                    //    dataFileTemp.appendObject( tempDe );
  +      Object[] keys = keyHash.keySet().toArray();
  +      int len = keys.length;
   
  -                    IndexedDiskElementDescriptor ded = new IndexedDiskElementDescriptor();
  -                    byte[] data = IndexedDisk.serialize( tempDe );
  -                    ded.init( dataFileTemp.length(), data );
  -                    dataFileTemp.write( data, ded.pos );
  -
  -                    if ( log.isDebugEnabled() )
  -                    {
  -                        log.debug( "Optomize: Put to temp disk cache: " + fileName +
  -                                   ", key: " + key + ", ded.pos:" + ded.pos + ", ded.len:" + ded.len);
  -                    }
  +      try
  +      {
   
  -                    keyHashTemp.put( key, ded );
  -                }
  -                catch ( Exception e )
  -                {
  -                    log.error( "Failed to put to temp disk cache: " + fileName
  -                               + ", key: " + key, e );
  -                }
  -            }
  +        //while ( itr.hasNext() )
  +        for (int i = 0; i < len; i++)
  +        {
  +          //Serializable key = ( Serializable ) itr.next();
  +          Serializable key = (Serializable) keys[i];
  +          this.moveKeyDataToTemp(key, keyHashTemp, dataFileTemp);
  +        }
   
  -            if ( log.isDebugEnabled() )
  -            {
  -                log.debug( fileName
  -                    + " -- keyHashTemp.size(): " + keyHashTemp.size()
  -                    + ", keyHash.size(): " + keyHash.size() );
  -            }
  +        // main
  +        tempToPrimary(keyHashTemp, dataFileTemp);
   
  -            // Make dataFileTemp to become dataFile on disk.
  -            dataFileTemp.close();
  -            dataFile.close();
  -            File oldData = new File( rafDir, fileName + ".data" );
  -            if ( oldData.exists() )
  -            {
  -                if ( log.isDebugEnabled() )
  -                {
  -                    log.debug( fileName + " -- oldData.length() = " +
  -                        oldData.length() );
  -                }
  -                oldData.delete();
  -            }
  -            File newData = new File( rafDir, fileName + "Temp.data" );
  -            File newFileName = new File( rafDir, fileName + ".data" );
  -            if ( newData.exists() )
  -            {
  -                if ( log.isDebugEnabled() )
  -                {
  -                    log.debug( fileName + " -- newData.length() = " +
  -                        newData.length() );
  -                }
  +      }
  +      catch (IOException e)
  +      {
  +        log.error("Problem in optimization, abandoning attempt");
  +      }
   
  -                newData.renameTo( newFileName );
  -            }
  -            keyHash = keyHashTemp;
  -            keyFile.reset();
  -            saveKeys();
  +    }
  +    catch (Exception e)
  +    {
  +      log.error(fileName, e);
  +    }
  +  }
   
  -            // clean up the recycle store
  -            recycle = null;
  -            recycle = new SortedPreferentialArray( maxKeySize );
  +  /**
  +   * Copies data for a key from main file to temp file and key to temp keyhash
  +   * Clients must manage locking.
  +   *
  +   * @param key Serializable
  +   * @param keyHash Map
  +   * @param dataFileTemp IndexedDisk
  +   */
  +  private void moveKeyDataToTemp(Serializable key, LRUMap keyHashTemp,
  +                                 IndexedDisk dataFileTemp) throws Exception
  +  {
   
  -        }
  -        catch ( Exception e )
  -        {
  -            log.error( fileName, e );
  -        }
  +    CacheElement tempDe = null;
  +    try
  +    {
  +      tempDe = readElement(key);
       }
  -
  -    /**
  -     * Returns the current cache size.
  -     *
  -     * @return The size value
  -     */
  -    public int getSize()
  +    catch (IOException e)
       {
  -        return keyHash.size();
  +      log.error("Failed to get orinigal off disk cache: " + fileName
  +                + ", key: " + key + "; keyHash.tag = " + keyHash.tag);
  +      reset();
  +      throw e;
       }
   
  -    /**
  -     * For debugging.
  -     */
  -    public void dump()
  +    try
       {
  -        log.debug( "[dump] Number of keys: " + keyHash.size() );
  +      //IndexedDiskElementDescriptor de =
  +      //    dataFileTemp.appendObject( tempDe );
   
  -        Iterator itr = keyHash.entrySet().iterator();
  +      IndexedDiskElementDescriptor ded = new IndexedDiskElementDescriptor();
  +      byte[] data = IndexedDisk.serialize(tempDe);
  +      ded.init(dataFileTemp.length(), data);
  +      dataFileTemp.write(data, ded.pos);
   
  -        while ( itr.hasNext() )
  -        {
  -            Map.Entry e = ( Map.Entry ) itr.next();
  +      if (log.isDebugEnabled())
  +      {
  +        log.debug("Optomize: Put to temp disk cache: " + fileName +
  +                  ", key: " + key + ", ded.pos:" + ded.pos + ", ded.len:" +
  +                  ded.len);
  +      }
  +
  +      keyHashTemp.put(key, ded);
  +    }
  +    catch (Exception e)
  +    {
  +      log.error("Failed to put to temp disk cache: " + fileName
  +                + ", key: " + key, e);
  +    }
   
  -            Serializable key = ( Serializable ) e.getKey();
  +    if (log.isDebugEnabled())
  +    {
  +      log.debug(fileName
  +                + " -- keyHashTemp.size(): " + keyHashTemp.size()
  +                + ", keyHash.size(): " + keyHash.size());
  +    }
   
  -            IndexedDiskElementDescriptor ded =
  -                ( IndexedDiskElementDescriptor ) e.getValue();
  +  }
   
  -            Serializable val = get( key );
  +  /**
  +   * Replaces current keyHash, data file, and recylce bin.
  +   * Temp file passed in must follow  Temp.data naming convention.
  +   *
  +   * @param keyHashTemp LRUMap
  +   * @param dataFileTemp IndexedDisk
  +   */
  +  private void tempToPrimary(LRUMap keyHashTemp, IndexedDisk dataFileTemp)
  +  {
   
  -            log.debug( "[dump] Disk element, key: " + key +
  -                       ", val: " + val +
  -                       ", pos: " + ded.pos );
  +    try
  +    {
  +      // Make dataFileTemp to become dataFile on disk.
  +      dataFileTemp.close();
  +      dataFile.close();
  +      File oldData = new File(rafDir, fileName + ".data");
  +      if (oldData.exists())
  +      {
  +        if (log.isInfoEnabled())
  +        {
  +          log.info(fileName + " -- oldData.length() = " +
  +                   oldData.length());
           }
  +        oldData.delete();
  +      }
  +      File newData = new File(rafDir, fileName + "Temp.data");
  +      File newFileName = new File(rafDir, fileName + ".data");
  +      if (newData.exists())
  +      {
  +        if (log.isInfoEnabled())
  +        {
  +          log.info(fileName + " -- newData.length() = " +
  +                   newData.length());
  +        }
  +
  +        newData.renameTo(newFileName);
  +      }
  +      dataFile =
  +          new IndexedDisk(newFileName);
  +
  +      keyHash = keyHashTemp;
  +      keyFile.reset();
  +      saveKeys();
  +
  +      // clean up the recycle store
  +      recycle = null;
  +      recycle = new SortedPreferentialArray(maxKeySize);
  +    }
  +    catch (Exception e)
  +    {
  +      log.error("Failed to put to temp disk cache", e);
       }
   
  -    /**
  -     * Inner class for recylcing and lru
  -     */
  -    public class LRUMap extends LRUMapJCS {
  +  }
   
  -      public LRUMap( int maxKeySize )
  -      {
  -        super( maxKeySize );
  -      }
  +///////////////////////////////////////////////////////////////////////////////
  +// DEBUG
  +  /**
  +   * Returns the current cache size.
  +   *
  +   * @return The size value
  +   */
  +  public int getSize()
  +  {
  +    return keyHash.size();
  +  }
  +
  +  /**
  +   * For debugging.
  +   */
  +  public void dump()
  +  {
  +    log.debug("[dump] Number of keys: " + keyHash.size());
   
  +    Iterator itr = keyHash.entrySet().iterator();
   
  -      protected void processRemovedLRU( Object key, Object value )
  -       {
  +    while (itr.hasNext())
  +    {
  +      Map.Entry e = (Map.Entry) itr.next();
   
  -         if ( doRecycle ) {
  -           // reuse the spot
  -           IndexedDiskElementDescriptor ded =
  -               (IndexedDiskElementDescriptor)value;
  -           if (ded != null)
  -           {
  -             recycle.add(ded);
  -             if (log.isInfoEnabled())
  -             {
  -               log.info("recycled ded in LRU" + ded);
  -             }
  -           }
  -         }
  +      Serializable key = (Serializable) e.getKey();
   
  +      IndexedDiskElementDescriptor ded =
  +          (IndexedDiskElementDescriptor) e.getValue();
   
  -         if ( log.isDebugEnabled() )
  -         {
  -           log.debug( "Removing key: '" + key + "' from key store." );
  -           log.debug( "Key store size: '" + this.size() + "'." );
  -         }
  +      Serializable val = get(key);
   
  -       }
  +      log.debug("[dump] Disk element, key: " + key +
  +                ", val: " + val +
  +                ", pos: " + ded.pos);
       }
  +  }
   
  +  /**
  +   * Gets basic stats for the disk cache.
  +   *
  +   * @return String
  +   */
  +  public String getStats()
  +  {
  +    StringBuffer buf = new StringBuffer();
  +    buf.append("\n -------------------------");
  +    buf.append("\n Indexed Disk Cache:");
  +    buf.append("\n Key Map Size = " + this.keyHash.size());
  +    try
  +    {
  +      buf.append("\n Data File Length = " + this.dataFile.length());
  +    }
  +    catch (Exception e)
  +    {
  +      log.error(e);
  +    }
  +    buf.append("\n Optimize Opertaion Count = " + this.optCnt);
  +    buf.append("\n Times Optimized = " + this.timesOptimized);
  +    buf.append("\n Recycle Count = " + this.recycleCnt);
  +
  +    buf.append( super.getStats() );
  +
  +    return buf.toString();
  +  }
  +
  +///////////////////////////////////////////////////////////////////////////////
  +// RECYLCE INNER CLASS
  +  /**
  +   * class for recylcing and lru
  +   */
  +  public class LRUMap
  +      extends LRUMapJCS
  +  {
   
  -}
  +    public String tag = "orig";
  +
  +    public LRUMap()
  +    {
  +      super();
  +    }
   
  +    public LRUMap(int maxKeySize)
  +    {
  +      super(maxKeySize);
  +    }
   
  +    protected void processRemovedLRU(Object key, Object value)
  +    {
   
  +      if (doRecycle)
  +      {
  +        // reuse the spot
  +        IndexedDiskElementDescriptor ded =
  +            (IndexedDiskElementDescriptor) value;
  +        if (ded != null)
  +        {
  +          recycle.add(ded);
  +          if (log.isDebugEnabled())
  +          {
  +            log.debug("recycled ded in LRU" + ded);
  +          }
  +        }
  +      }
  +
  +      if (log.isDebugEnabled())
  +      {
  +        log.debug("Removing key: '" + key + "' from key store.");
  +        log.debug("Key store size: '" + this.size() + "'.");
  +      }
   
  +    }
  +  }
   
  +}
  
  
  
  1.7       +21 -9     jakarta-turbine-jcs/src/java/org/apache/jcs/auxiliary/disk/indexed/IndexedDisk.java
  
  Index: IndexedDisk.java
  ===================================================================
  RCS file: /home/cvs/jakarta-turbine-jcs/src/java/org/apache/jcs/auxiliary/disk/indexed/IndexedDisk.java,v
  retrieving revision 1.6
  retrieving revision 1.7
  diff -u -r1.6 -r1.7
  --- IndexedDisk.java	12 Jun 2004 02:34:13 -0000	1.6
  +++ IndexedDisk.java	16 Jul 2004 01:26:34 -0000	1.7
  @@ -68,7 +68,7 @@
        * @return
        * @param pos
        */
  -    Serializable readObject( long pos )
  +    Serializable readObject( long pos ) throws IOException
       {
           byte[] data = null;
           boolean corrupted = false;
  @@ -76,22 +76,31 @@
           {
               synchronized ( this )
               {
  -                raf.seek( pos );
  -                int datalen = raf.readInt();
  -                if ( datalen > raf.length() )
  -                {
  -                    corrupted = true;
  +                if ( pos > raf.length() ) {
  +                  corrupted = true;
                   }
                   else
                   {
  -                    raf.readFully( data = new byte[datalen] );
  +                  raf.seek(pos);
  +                  int datalen = raf.readInt();
  +                  if (datalen > raf.length())
  +                  {
  +                    corrupted = true;
  +                  }
  +                  else
  +                  {
  +                    raf.readFully(data = new byte[datalen]);
  +                  }
                   }
               }
               if ( corrupted )
               {
  -                log.warn( "The dataFile is corrupted!" );
  +                log.warn( "\n The dataFile is corrupted!" +
  +                          "\n raf.length() = " + raf.length() +
  +                          "\n pos = " + pos );
                   //reset();
  -                return null;
  +                throw new IOException( "The Data File Is Corrupt, need to reset" );
  +               // return null;
               }
               ByteArrayInputStream bais = new ByteArrayInputStream( data );
               BufferedInputStream bis = new BufferedInputStream( bais );
  @@ -108,6 +117,9 @@
           catch ( Exception e )
           {
               log.error( raf, e );
  +            if( e instanceof IOException ) {
  +              throw (IOException)e;
  +            }
           }
           return null;
       }
  
  
  

---------------------------------------------------------------------
To unsubscribe, e-mail: turbine-jcs-dev-unsubscribe@jakarta.apache.org
For additional commands, e-mail: turbine-jcs-dev-help@jakarta.apache.org