You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@commons.apache.org by gg...@apache.org on 2021/01/17 02:32:38 UTC

[commons-jcs] 01/02: Sort members.

This is an automated email from the ASF dual-hosted git repository.

ggregory pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/commons-jcs.git

commit 11cdf60a77abdb063c427d3c60ee24e0e6821798
Author: Gary Gregory <ga...@gmail.com>
AuthorDate: Sat Jan 16 21:23:29 2021 -0500

    Sort members.
---
 .../auxiliary/disk/block/BlockDiskKeyStore.java    | 494 ++++++++++-----------
 1 file changed, 247 insertions(+), 247 deletions(-)

diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskKeyStore.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskKeyStore.java
index bb4519f..0b7bcfa 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskKeyStore.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskKeyStore.java
@@ -52,6 +52,163 @@ import org.apache.commons.jcs3.utils.timing.ElapsedTimer;
  */
 public class BlockDiskKeyStore<K>
 {
+    /**
+     * Class for recycling and lru. This implements the LRU overflow callback,
+     * so we can mark the blocks as free.
+     */
+    public class LRUMapCountLimited extends LRUMap<K, int[]>
+    {
+        /**
+         * <code>tag</code> tells us which map we are working on.
+         */
+        public final static String TAG = "orig-lru-count";
+
+        public LRUMapCountLimited(final int maxKeySize)
+        {
+            super(maxKeySize);
+        }
+
+        /**
+         * This is called when the may key size is reached. The least recently
+         * used item will be passed here. We will store the position and size of
+         * the spot on disk in the recycle bin.
+         * <p>
+         *
+         * @param key
+         * @param value
+         */
+        @Override
+        protected void processRemovedLRU(final K key, final int[] value)
+        {
+            blockDiskCache.freeBlocks(value);
+            if (log.isDebugEnabled())
+            {
+                log.debug("{0}: Removing key: [{1}] from key store.", logCacheName, key);
+                log.debug("{0}: Key store size: [{1}].", logCacheName, super.size());
+            }
+        }
+    }
+
+    /**
+     * Class for recycling and lru. This implements the LRU size overflow
+     * callback, so we can mark the blocks as free.
+     */
+    public class LRUMapSizeLimited extends AbstractLRUMap<K, int[]>
+    {
+        /**
+         * <code>tag</code> tells us which map we are working on.
+         */
+        public final static String TAG = "orig-lru-size";
+
+        // size of the content in kB
+        private final AtomicInteger contentSize;
+        private final int maxSize;
+
+        /**
+         * Default
+         */
+        public LRUMapSizeLimited()
+        {
+            this(-1);
+        }
+
+        /**
+         * @param maxSize
+         *            maximum cache size in kB
+         */
+        public LRUMapSizeLimited(final int maxSize)
+        {
+            this.maxSize = maxSize;
+            this.contentSize = new AtomicInteger(0);
+        }
+
+        // keep the content size in kB, so 2^31 kB is reasonable value
+        private void addLengthToCacheSize(final int[] value)
+        {
+            contentSize.addAndGet(value.length * blockSize / 1024 + 1);
+        }
+
+        /**
+         * This is called when the may key size is reached. The least recently
+         * used item will be passed here. We will store the position and size of
+         * the spot on disk in the recycle bin.
+         * <p>
+         *
+         * @param key
+         * @param value
+         */
+        @Override
+        protected void processRemovedLRU(final K key, final int[] value)
+        {
+            blockDiskCache.freeBlocks(value);
+            if (log.isDebugEnabled())
+            {
+                log.debug("{0}: Removing key: [{1}] from key store.", logCacheName, key);
+                log.debug("{0}: Key store size: [{1}].", logCacheName, super.size());
+            }
+
+            if (value != null)
+            {
+                subLengthFromCacheSize(value);
+            }
+        }
+
+        @Override
+        public int[] put(final K key, final int[] value)
+        {
+            int[] oldValue = null;
+
+            try
+            {
+                oldValue = super.put(key, value);
+            }
+            finally
+            {
+                if (value != null)
+                {
+                    addLengthToCacheSize(value);
+                }
+                if (oldValue != null)
+                {
+                    subLengthFromCacheSize(oldValue);
+                }
+            }
+
+            return oldValue;
+        }
+
+        @Override
+        public int[] remove(final Object key)
+        {
+            int[] value = null;
+
+            try
+            {
+                value = super.remove(key);
+                return value;
+            }
+            finally
+            {
+                if (value != null)
+                {
+                    subLengthFromCacheSize(value);
+                }
+            }
+        }
+
+        @Override
+        protected boolean shouldRemove()
+        {
+            return maxSize > 0 && contentSize.get() > maxSize && this.size() > 1;
+        }
+
+        // keep the content size in kB, so 2^31 kB is reasonable value
+        private void subLengthFromCacheSize(final int[] value)
+        {
+            contentSize.addAndGet(value.length * blockSize / -1024 - 1);
+        }
+    }
+
     /** The logger */
     private static final Log log = LogManager.getLog(BlockDiskKeyStore.class);
 
@@ -126,71 +283,35 @@ public class BlockDiskKeyStore<K>
     }
 
     /**
-     * Saves key file to disk. This gets the LRUMap entry set and write the
-     * entries out one by one after putting them in a wrapper.
+     * This is mainly used for testing. It leave the disk in tact, and just
+     * clears memory.
      */
-    protected void saveKeys()
+    protected void clearMemoryMap()
     {
-        try
-        {
-            final ElapsedTimer timer = new ElapsedTimer();
-            final int numKeys = keyHash.size();
-            log.info("{0}: Saving keys to [{1}], key count [{2}]", () -> logCacheName,
-                    () -> this.keyFile.getAbsolutePath(), () -> numKeys);
-
-            synchronized (keyFile)
-            {
-                final FileOutputStream fos = new FileOutputStream(keyFile);
-                final BufferedOutputStream bos = new BufferedOutputStream(fos, 65536);
-
-                try (ObjectOutputStream oos = new ObjectOutputStream(bos))
-                {
-                    if (!verify())
-                    {
-                        throw new IOException("Inconsistent key file");
-                    }
-                    // don't need to synchronize, since the underlying
-                    // collection makes a copy
-                    for (final Map.Entry<K, int[]> entry : keyHash.entrySet())
-                    {
-                        final BlockDiskElementDescriptor<K> descriptor = new BlockDiskElementDescriptor<>();
-                        descriptor.setKey(entry.getKey());
-                        descriptor.setBlocks(entry.getValue());
-                        // stream these out in the loop.
-                        oos.writeUnshared(descriptor);
-                    }
-                }
-            }
-
-            log.info("{0}: Finished saving keys. It took {1} to store {2} keys. Key file length [{3}]",
-                    () -> logCacheName, () -> timer.getElapsedTimeString(), () -> numKeys,
-                    () -> keyFile.length());
-        }
-        catch (final IOException e)
-        {
-            log.error("{0}: Problem storing keys.", logCacheName, e);
-        }
+        this.keyHash.clear();
     }
 
     /**
-     * Resets the file and creates a new key map.
+     * Gets the entry set.
+     * <p>
+     *
+     * @return entry set.
      */
-    protected void reset()
+    public Set<Map.Entry<K, int[]>> entrySet()
     {
-        synchronized (keyFile)
-        {
-            clearMemoryMap();
-            saveKeys();
-        }
+        return this.keyHash.entrySet();
     }
 
     /**
-     * This is mainly used for testing. It leave the disk in tact, and just
-     * clears memory.
+     * gets the object for the key.
+     * <p>
+     *
+     * @param key
+     * @return Object
      */
-    protected void clearMemoryMap()
+    public int[] get(final K key)
     {
-        this.keyHash.clear();
+        return this.keyHash.get(key);
     }
 
     /**
@@ -222,6 +343,17 @@ public class BlockDiskKeyStore<K>
     }
 
     /**
+     * Gets the key set.
+     * <p>
+     *
+     * @return key set.
+     */
+    public Set<K> keySet()
+    {
+        return this.keyHash.keySet();
+    }
+
+    /**
      * Loads the keys from the .key file. The keys are stored individually on
      * disk. They are added one by one to an LRUMap..
      */
@@ -278,72 +410,97 @@ public class BlockDiskKeyStore<K>
     }
 
     /**
-     * Gets the entry set.
+     * Puts a int[] in the keyStore.
      * <p>
      *
-     * @return entry set.
+     * @param key
+     * @param value
      */
-    public Set<Map.Entry<K, int[]>> entrySet()
+    public void put(final K key, final int[] value)
     {
-        return this.keyHash.entrySet();
+        this.keyHash.put(key, value);
     }
 
     /**
-     * Gets the key set.
+     * Remove by key.
      * <p>
      *
-     * @return key set.
+     * @param key
+     * @return BlockDiskElementDescriptor if it was present, else null
      */
-    public Set<K> keySet()
+    public int[] remove(final K key)
     {
-        return this.keyHash.keySet();
+        return this.keyHash.remove(key);
     }
 
     /**
-     * Gets the size of the key hash.
-     * <p>
-     *
-     * @return the number of keys.
+     * Resets the file and creates a new key map.
      */
-    public int size()
+    protected void reset()
     {
-        return this.keyHash.size();
+        synchronized (keyFile)
+        {
+            clearMemoryMap();
+            saveKeys();
+        }
     }
 
     /**
-     * gets the object for the key.
-     * <p>
-     *
-     * @param key
-     * @return Object
+     * Saves key file to disk. This gets the LRUMap entry set and write the
+     * entries out one by one after putting them in a wrapper.
      */
-    public int[] get(final K key)
+    protected void saveKeys()
     {
-        return this.keyHash.get(key);
-    }
+        try
+        {
+            final ElapsedTimer timer = new ElapsedTimer();
+            final int numKeys = keyHash.size();
+            log.info("{0}: Saving keys to [{1}], key count [{2}]", () -> logCacheName,
+                    () -> this.keyFile.getAbsolutePath(), () -> numKeys);
 
-    /**
-     * Puts a int[] in the keyStore.
-     * <p>
-     *
-     * @param key
-     * @param value
-     */
-    public void put(final K key, final int[] value)
-    {
-        this.keyHash.put(key, value);
+            synchronized (keyFile)
+            {
+                final FileOutputStream fos = new FileOutputStream(keyFile);
+                final BufferedOutputStream bos = new BufferedOutputStream(fos, 65536);
+
+                try (ObjectOutputStream oos = new ObjectOutputStream(bos))
+                {
+                    if (!verify())
+                    {
+                        throw new IOException("Inconsistent key file");
+                    }
+                    // don't need to synchronize, since the underlying
+                    // collection makes a copy
+                    for (final Map.Entry<K, int[]> entry : keyHash.entrySet())
+                    {
+                        final BlockDiskElementDescriptor<K> descriptor = new BlockDiskElementDescriptor<>();
+                        descriptor.setKey(entry.getKey());
+                        descriptor.setBlocks(entry.getValue());
+                        // stream these out in the loop.
+                        oos.writeUnshared(descriptor);
+                    }
+                }
+            }
+
+            log.info("{0}: Finished saving keys. It took {1} to store {2} keys. Key file length [{3}]",
+                    () -> logCacheName, () -> timer.getElapsedTimeString(), () -> numKeys,
+                    () -> keyFile.length());
+        }
+        catch (final IOException e)
+        {
+            log.error("{0}: Problem storing keys.", logCacheName, e);
+        }
     }
 
     /**
-     * Remove by key.
+     * Gets the size of the key hash.
      * <p>
      *
-     * @param key
-     * @return BlockDiskElementDescriptor if it was present, else null
+     * @return the number of keys.
      */
-    public int[] remove(final K key)
+    public int size()
     {
-        return this.keyHash.remove(key);
+        return this.keyHash.size();
     }
 
     /**
@@ -390,161 +547,4 @@ public class BlockDiskKeyStore<K>
             return ok;
         }
     }
-
-    /**
-     * Class for recycling and lru. This implements the LRU size overflow
-     * callback, so we can mark the blocks as free.
-     */
-    public class LRUMapSizeLimited extends AbstractLRUMap<K, int[]>
-    {
-        /**
-         * <code>tag</code> tells us which map we are working on.
-         */
-        public final static String TAG = "orig-lru-size";
-
-        // size of the content in kB
-        private final AtomicInteger contentSize;
-        private final int maxSize;
-
-        /**
-         * Default
-         */
-        public LRUMapSizeLimited()
-        {
-            this(-1);
-        }
-
-        /**
-         * @param maxSize
-         *            maximum cache size in kB
-         */
-        public LRUMapSizeLimited(final int maxSize)
-        {
-            this.maxSize = maxSize;
-            this.contentSize = new AtomicInteger(0);
-        }
-
-        // keep the content size in kB, so 2^31 kB is reasonable value
-        private void subLengthFromCacheSize(final int[] value)
-        {
-            contentSize.addAndGet(value.length * blockSize / -1024 - 1);
-        }
-
-        // keep the content size in kB, so 2^31 kB is reasonable value
-        private void addLengthToCacheSize(final int[] value)
-        {
-            contentSize.addAndGet(value.length * blockSize / 1024 + 1);
-        }
-
-        @Override
-        public int[] put(final K key, final int[] value)
-        {
-            int[] oldValue = null;
-
-            try
-            {
-                oldValue = super.put(key, value);
-            }
-            finally
-            {
-                if (value != null)
-                {
-                    addLengthToCacheSize(value);
-                }
-                if (oldValue != null)
-                {
-                    subLengthFromCacheSize(oldValue);
-                }
-            }
-
-            return oldValue;
-        }
-
-        @Override
-        public int[] remove(final Object key)
-        {
-            int[] value = null;
-
-            try
-            {
-                value = super.remove(key);
-                return value;
-            }
-            finally
-            {
-                if (value != null)
-                {
-                    subLengthFromCacheSize(value);
-                }
-            }
-        }
-
-        /**
-         * This is called when the may key size is reached. The least recently
-         * used item will be passed here. We will store the position and size of
-         * the spot on disk in the recycle bin.
-         * <p>
-         *
-         * @param key
-         * @param value
-         */
-        @Override
-        protected void processRemovedLRU(final K key, final int[] value)
-        {
-            blockDiskCache.freeBlocks(value);
-            if (log.isDebugEnabled())
-            {
-                log.debug("{0}: Removing key: [{1}] from key store.", logCacheName, key);
-                log.debug("{0}: Key store size: [{1}].", logCacheName, super.size());
-            }
-
-            if (value != null)
-            {
-                subLengthFromCacheSize(value);
-            }
-        }
-
-        @Override
-        protected boolean shouldRemove()
-        {
-            return maxSize > 0 && contentSize.get() > maxSize && this.size() > 1;
-        }
-    }
-
-    /**
-     * Class for recycling and lru. This implements the LRU overflow callback,
-     * so we can mark the blocks as free.
-     */
-    public class LRUMapCountLimited extends LRUMap<K, int[]>
-    {
-        /**
-         * <code>tag</code> tells us which map we are working on.
-         */
-        public final static String TAG = "orig-lru-count";
-
-        public LRUMapCountLimited(final int maxKeySize)
-        {
-            super(maxKeySize);
-        }
-
-        /**
-         * This is called when the may key size is reached. The least recently
-         * used item will be passed here. We will store the position and size of
-         * the spot on disk in the recycle bin.
-         * <p>
-         *
-         * @param key
-         * @param value
-         */
-        @Override
-        protected void processRemovedLRU(final K key, final int[] value)
-        {
-            blockDiskCache.freeBlocks(value);
-            if (log.isDebugEnabled())
-            {
-                log.debug("{0}: Removing key: [{1}] from key store.", logCacheName, key);
-                log.debug("{0}: Key store size: [{1}].", logCacheName, super.size());
-            }
-        }
-    }
 }