You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@commons.apache.org by gg...@apache.org on 2022/08/30 14:41:35 UTC

[commons-jcs] branch master updated: We don't use author tag, CVS/SVN tags

This is an automated email from the ASF dual-hosted git repository.

ggregory pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/commons-jcs.git


The following commit(s) were added to refs/heads/master by this push:
     new fabc3242 We don't use author tag, CVS/SVN tags
fabc3242 is described below

commit fabc324298156e32be5937a0c1cc935bc7b35d17
Author: Gary Gregory <gg...@rocketsoftware.com>
AuthorDate: Tue Aug 30 10:41:27 2022 -0400

    We don't use author tag, CVS/SVN tags
    
    - Move Apache license text to the top of some files
    - Remove useless paragraph tags
    - Close some paragraph tags
    - Remove some whitespace
---
 .../jcs3/auxiliary/disk/block/BlockDisk.java       | 1032 +++++-----
 .../jcs3/auxiliary/disk/block/BlockDiskCache.java  | 1408 +++++++-------
 .../disk/block/BlockDiskCacheAttributes.java       |  234 ++-
 .../disk/block/BlockDiskElementDescriptor.java     |  304 ++-
 .../auxiliary/disk/block/BlockDiskKeyStore.java    | 1211 ++++++------
 .../jcs3/auxiliary/disk/jdbc/JDBCDiskCache.java    | 1615 ++++++++--------
 .../disk/jdbc/JDBCDiskCacheAttributes.java         |  660 ++++---
 .../auxiliary/disk/jdbc/JDBCDiskCacheFactory.java  |  526 +++---
 .../jcs3/auxiliary/disk/jdbc/ShrinkerThread.java   |  294 ++-
 .../jcs3/auxiliary/disk/jdbc/TableState.java       |  226 ++-
 .../disk/jdbc/dsfactory/DataSourceFactory.java     |    4 -
 .../disk/jdbc/dsfactory/JndiDataSourceFactory.java |  343 ++--
 .../dsfactory/SharedPoolDataSourceFactory.java     |    3 -
 .../disk/jdbc/hsql/HSQLDiskCacheFactory.java       |  256 ++-
 .../auxiliary/disk/jdbc/mysql/MySQLDiskCache.java  |  309 ++-
 .../disk/jdbc/mysql/MySQLDiskCacheAttributes.java  |  212 ++-
 .../disk/jdbc/mysql/MySQLDiskCacheFactory.java     |  314 ++--
 .../disk/jdbc/mysql/MySQLTableOptimizer.java       |  573 +++---
 .../disk/jdbc/mysql/util/ScheduleParser.java       |  184 +-
 .../tcp/behavior/ITCPLateralCacheAttributes.java   |  496 +++--
 .../remote/behavior/IRemoteCacheClient.java        |  121 +-
 .../remote/server/RemoteCacheStartupServlet.java   |  574 +++---
 .../remote/server/behavior/IRemoteCacheServer.java |   73 +-
 .../jcs3/engine/AbstractCacheEventQueue.java       |  854 +++++----
 .../jcs3/engine/CacheEventQueueFactory.java        |  168 +-
 .../engine/behavior/ICacheElementSerialized.java   |   82 +-
 .../engine/behavior/ICompositeCacheManager.java    |  126 +-
 .../jcs3/engine/behavior/IElementSerializer.java   |  566 +++---
 .../jcs3/engine/behavior/IProvideScheduler.java    |   73 +-
 .../jcs3/engine/behavior/IRequireScheduler.java    |   75 +-
 .../jcs3/engine/behavior/IShutdownObservable.java  |  109 +-
 .../jcs3/engine/behavior/IShutdownObserver.java    |   80 +-
 .../memory/soft/SoftReferenceMemoryCache.java      |  479 +++--
 .../apache/commons/jcs3/engine/stats/Stats.java    |  197 +-
 .../commons/jcs3/engine/stats/behavior/IStats.java |  123 +-
 .../jcs3/utils/access/AbstractJCSWorkerHelper.java |  116 +-
 .../commons/jcs3/utils/access/JCSWorker.java       |  571 +++---
 .../commons/jcs3/utils/access/JCSWorkerHelper.java |  119 +-
 .../jcs3/utils/discovery/DiscoveredService.java    |  378 ++--
 .../jcs3/utils/discovery/UDPCleanupRunner.java     |    3 +-
 .../utils/discovery/UDPDiscoveryAttributes.java    |  537 +++---
 .../jcs3/utils/discovery/UDPDiscoveryManager.java  |  278 ++-
 .../jcs3/utils/discovery/UDPDiscoverySender.java   |  642 ++++---
 .../jcs3/utils/discovery/UDPDiscoveryService.java  | 1046 +++++------
 .../commons/jcs3/utils/net/HostNameUtil.java       |  438 +++--
 .../serialization/SerializationConversionUtil.java |  262 ++-
 .../utils/serialization/StandardSerializer.java    |  164 +-
 .../utils/servlet/JCSServletContextListener.java   |  145 +-
 .../commons/jcs3/utils/struct/AbstractLRUMap.java  |    5 +-
 .../apache/commons/jcs3/utils/struct/LRUMap.java   |  109 +-
 .../jcs3/utils/threadpool/DaemonThreadFactory.java |  144 +-
 .../jcs3/utils/threadpool/PoolConfiguration.java   |  584 +++---
 .../jcs3/utils/threadpool/ThreadPoolManager.java   |  669 +++----
 .../jcs3/JCSCacheElementRetrievalUnitTest.java     |  104 +-
 .../jcs3/JCSConcurrentCacheAccessUnitTest.java     |  361 ++--
 .../apache/commons/jcs3/ZeroSizeCacheUnitTest.java |  179 +-
 .../commons/jcs3/access/CacheAccessUnitTest.java   |  730 ++++----
 .../jcs3/access/GroupCacheAccessUnitTest.java      |  478 +++--
 .../jcs3/access/SystemPropertyUnitTest.java        |  175 +-
 .../commons/jcs3/admin/AdminBeanUnitTest.java      |  345 ++--
 .../commons/jcs3/admin/CountingStreamUnitTest.java |  155 +-
 .../commons/jcs3/auxiliary/MockAuxiliaryCache.java |  428 +++--
 .../disk/block/BlockDiskCacheKeyStoreUnitTest.java |  380 ++--
 .../disk/block/BlockDiskCacheSteadyLoadTest.java   |  320 ++--
 .../auxiliary/disk/block/BlockDiskUnitTest.java    |  734 ++++----
 .../block/HugeQuantityBlockDiskCacheLoadTest.java  |  271 ++-
 .../auxiliary/disk/indexed/DiskTestObjectUtil.java |  288 ++-
 .../indexed/HugeQuantityIndDiskCacheLoadTest.java  |  248 ++-
 .../indexed/IndexDiskCacheUnitTestAbstract.java    | 1967 ++++++++++----------
 .../indexed/IndexedDiskCacheKeyStoreUnitTest.java  |  297 ++-
 .../IndexedDiskCacheOptimizationUnitTest.java      |  186 +-
 .../indexed/IndexedDiskCacheSteadyLoadTest.java    |  306 ++-
 .../auxiliary/disk/indexed/LRUMapSizeVsCount.java  |  423 +++--
 .../disk/jdbc/JDBCDiskCacheSharedPoolUnitTest.java |  282 ++-
 .../disk/jdbc/JDBCDiskCacheShrinkUnitTest.java     |  442 +++--
 .../auxiliary/disk/jdbc/JDBCDiskCacheUnitTest.java |  410 ++--
 .../mysql/MySQLDiskCacheHsqlBackedUnitTest.java    |  355 ++--
 .../disk/jdbc/mysql/MySQLDiskCacheUnitTest.java    |  140 +-
 .../mysql/util/ScheduleParserUtilUnitTest.java     |  256 ++-
 .../tcp/LateralTCPConcurrentRandomTestUtil.java    |  397 ++--
 .../LateralTCPFilterRemoveHashCodeUnitTest.java    |  391 ++--
 .../tcp/LateralTCPIssueRemoveOnPutUnitTest.java    |  445 +++--
 .../lateral/socket/tcp/TestTCPLateralUnitTest.java |  662 ++++---
 .../auxiliary/remote/MockRemoteCacheClient.java    |  520 +++---
 .../auxiliary/remote/MockRemoteCacheListener.java  |  334 ++--
 .../remote/RemoteCacheListenerUnitTest.java        |  246 ++-
 .../remote/RemoteCacheNoWaitUnitTest.java          |  420 +++--
 .../jcs3/auxiliary/remote/RemoteUtilsUnitTest.java |  129 +-
 .../jcs3/auxiliary/remote/TestRemoteCache.java     |  279 ++-
 .../BasicRemoteCacheClientServerUnitTest.java      |  675 ++++---
 .../RemoteCacheServerAttributesUnitTest.java       |  128 +-
 .../server/RemoteCacheServerStartupUtil.java       |  222 ++-
 .../remote/server/RemoteCacheServerUnitTest.java   |  928 +++++----
 .../jcs3/engine/EventQueueConcurrentLoadTest.java  |  694 ++++---
 .../engine/control/CacheManagerStatsUnitTest.java  |  142 +-
 .../control/CompositeCacheDiskUsageUnitTest.java   |  980 +++++-----
 .../engine/control/CompositeCacheUnitTest.java     |  470 +++--
 .../control/event/ElementEventHandlerMockImpl.java |  371 ++--
 .../jcs3/engine/memory/MockMemoryCache.java        |  508 +++--
 .../memory/lru/LHMLRUMemoryCacheUnitTest.java      |  624 ++++---
 .../engine/memory/mru/MRUMemoryCacheUnitTest.java  |  624 ++++---
 .../memory/shrinking/ShrinkerThreadUnitTest.java   |  670 ++++---
 .../soft/SoftReferenceMemoryCacheUnitTest.java     |  490 +++--
 .../jcs3/utils/access/JCSWorkerUnitTest.java       |  139 +-
 .../jcs3/utils/config/PropertySetterUnitTest.java  |  119 +-
 .../SerializationConversionUtilUnitTest.java       |  390 ++--
 .../serialization/StandardSerializerUnitTest.java  |  206 +-
 .../jcs3/utils/struct/LRUMapPerformanceTest.java   |  349 ++--
 .../commons/jcs3/utils/struct/LRUMapUnitTest.java  |  265 ++-
 .../threadpool/ThreadPoolManagerUnitTest.java      |  170 +-
 .../commons/jcs/access/PartitionedCacheAccess.java | 1722 ++++++++---------
 .../jcs/yajcache/beans/CacheChangeEvent.java       |   84 +-
 .../jcs/yajcache/beans/CacheChangeSupport.java     |  150 +-
 .../jcs/yajcache/beans/CacheClearEvent.java        |   74 +-
 .../jcs/yajcache/beans/CachePutBeanCloneEvent.java |   78 +-
 .../jcs/yajcache/beans/CachePutBeanCopyEvent.java  |   78 +-
 .../jcs/yajcache/beans/CachePutCopyEvent.java      |   78 +-
 .../commons/jcs/yajcache/beans/CachePutEvent.java  |  102 +-
 .../jcs/yajcache/beans/CacheRemoveEvent.java       |   88 +-
 .../jcs/yajcache/beans/ICacheChangeHandler.java    |   82 +-
 .../jcs/yajcache/beans/ICacheChangeListener.java   |   64 +-
 .../jcs/yajcache/config/PerCacheConfig.java        |   83 +-
 .../jcs/yajcache/config/YajCacheConfig.java        |   97 +-
 .../commons/jcs/yajcache/core/CacheEntry.java      |  120 +-
 .../commons/jcs/yajcache/core/CacheManager.java    |  676 ++++---
 .../commons/jcs/yajcache/core/CacheType.java       |  188 +-
 .../apache/commons/jcs/yajcache/core/ICache.java   |  102 +-
 .../commons/jcs/yajcache/core/ICacheSafe.java      |  266 ++-
 .../jcs/yajcache/core/SafeCacheWrapper.java        |  328 ++--
 .../jcs/yajcache/file/CacheFileContent.java        |  314 ++--
 .../yajcache/file/CacheFileContentCorrupted.java   |  150 +-
 .../jcs/yajcache/file/CacheFileContentType.java    |  172 +-
 .../commons/jcs/yajcache/file/CacheFileDAO.java    |  322 ++--
 .../commons/jcs/yajcache/file/CacheFileUtils.java  |  204 +-
 .../yajcache/lang/annotation/CopyRightApache.java  |   70 +-
 .../yajcache/lang/annotation/CopyRightType.java    |  102 +-
 .../jcs/yajcache/lang/annotation/Immutable.java    |   88 +-
 .../jcs/yajcache/lang/annotation/Implements.java   |   60 +-
 .../jcs/yajcache/lang/annotation/JavaBean.java     |   90 +-
 .../jcs/yajcache/lang/annotation/NonNullable.java  |   88 +-
 .../commons/jcs/yajcache/lang/annotation/TODO.java |   76 +-
 .../jcs/yajcache/lang/annotation/TestOnly.java     |   74 +-
 .../jcs/yajcache/lang/annotation/ThreadSafety.java |   78 +-
 .../yajcache/lang/annotation/ThreadSafetyType.java |  134 +-
 .../lang/annotation/UnsupportedOperation.java      |   76 +-
 .../apache/commons/jcs/yajcache/lang/ref/IKey.java |   62 +-
 .../jcs/yajcache/lang/ref/KeyedRefCollector.java   |  160 +-
 .../jcs/yajcache/lang/ref/KeyedSoftReference.java  |  140 +-
 .../jcs/yajcache/lang/ref/KeyedWeakReference.java  |  136 +-
 .../commons/jcs/yajcache/soft/SoftRefCache.java    |  552 +++---
 .../jcs/yajcache/soft/SoftRefFileCache.java        | 1106 ++++++-----
 .../commons/jcs/yajcache/util/BeanUtils.java       |  152 +-
 .../commons/jcs/yajcache/util/ClassUtils.java      |  112 +-
 .../commons/jcs/yajcache/util/CollectionUtils.java |   98 +-
 .../commons/jcs/yajcache/util/EqualsUtils.java     |  155 +-
 .../commons/jcs/yajcache/util/SerializeUtils.java  |  149 +-
 .../util/concurrent/locks/IKeyedReadWriteLock.java |   66 +-
 .../util/concurrent/locks/KeyedReadWriteLock.java  |  332 ++--
 .../jcs/yajcache/core/CacheManagerTest.java        |  232 ++-
 .../jcs/yajcache/core/SafeCacheManagerTest.java    |  368 ++--
 .../jcs/yajcache/file/CacheFileDAOTest.java        |  282 ++-
 .../jcs/yajcache/file/FileContentTypeTest.java     |  108 +-
 .../lang/annotation/CopyRightApacheTest.java       |   76 +-
 .../jcs/yajcache/soft/SoftRefCacheTest.java        |  154 +-
 .../yajcache/soft/SoftRefFileCacheSafeTest.java    |  156 +-
 .../jcs/yajcache/util/SerializeUtilsTest.java      |  182 +-
 .../jcs/yajcache/util/TestSerializable.java        |  116 +-
 .../http/broadcast/LateralCacheMulticaster.java    |  165 +-
 .../lateral/http/server/DeleteCacheServlet.java    |  373 ++--
 .../http/server/LateralCacheServletReciever.java   |  382 ++--
 .../lateral/socket/udp/LateralUDPSender.java       |  459 +++--
 .../jcs/engine/memory/arc/ARCMemoryCache.java      | 1602 ++++++++--------
 .../engine/memory/arc/ARCMemoryCacheUnitTest.java  |  576 +++---
 173 files changed, 29012 insertions(+), 29358 deletions(-)

diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDisk.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDisk.java
index 123ba388..564dca3f 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDisk.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDisk.java
@@ -1,517 +1,515 @@
-package org.apache.commons.jcs3.auxiliary.disk.block;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.nio.file.StandardOpenOption;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.commons.jcs3.engine.behavior.IElementSerializer;
-import org.apache.commons.jcs3.log.Log;
-import org.apache.commons.jcs3.log.LogManager;
-import org.apache.commons.jcs3.utils.serialization.StandardSerializer;
-
-/**
- * This class manages reading an writing data to disk. When asked to write a value, it returns a
- * block array. It can read an object from the block numbers in a byte array.
- * <p>
- * @author Aaron Smuts
- */
-public class BlockDisk implements AutoCloseable
-{
-    /** The logger */
-    private static final Log log = LogManager.getLog(BlockDisk.class);
-
-    /** The size of the header that indicates the amount of data stored in an occupied block. */
-    public static final byte HEADER_SIZE_BYTES = 4;
-    // N.B. 4 bytes is the size used for ByteBuffer.putInt(int value) and ByteBuffer.getInt()
-
-    /** defaults to 4kb */
-    private static final int DEFAULT_BLOCK_SIZE_BYTES = 4 * 1024;
-
-    /** Size of the blocks */
-    private final int blockSizeBytes;
-
-    /**
-     * the total number of blocks that have been used. If there are no free, we will use this to
-     * calculate the position of the next block.
-     */
-    private final AtomicInteger numberOfBlocks = new AtomicInteger(0);
-
-    /** Empty blocks that can be reused. */
-    private final ConcurrentLinkedQueue<Integer> emptyBlocks = new ConcurrentLinkedQueue<>();
-
-    /** The serializer. */
-    private final IElementSerializer elementSerializer;
-
-    /** Location of the spot on disk */
-    private final String filepath;
-
-    /** File channel for multiple concurrent reads and writes */
-    private final FileChannel fc;
-
-    /** How many bytes have we put to disk */
-    private final AtomicLong putBytes = new AtomicLong();
-
-    /** How many items have we put to disk */
-    private final AtomicLong putCount = new AtomicLong();
-
-    /**
-     * Constructor for the Disk object
-     * <p>
-     * @param file
-     * @param elementSerializer
-     * @throws IOException
-     */
-    public BlockDisk(final File file, final IElementSerializer elementSerializer)
-        throws IOException
-    {
-        this(file, DEFAULT_BLOCK_SIZE_BYTES, elementSerializer);
-    }
-
-    /**
-     * Creates the file and set the block size in bytes.
-     * <p>
-     * @param file
-     * @param blockSizeBytes
-     * @throws IOException
-     */
-    public BlockDisk(final File file, final int blockSizeBytes)
-        throws IOException
-    {
-        this(file, blockSizeBytes, new StandardSerializer());
-    }
-
-    /**
-     * Creates the file and set the block size in bytes.
-     * <p>
-     * @param file
-     * @param blockSizeBytes
-     * @param elementSerializer
-     * @throws IOException
-     */
-    public BlockDisk(final File file, final int blockSizeBytes, final IElementSerializer elementSerializer)
-        throws IOException
-    {
-        this.filepath = file.getAbsolutePath();
-        this.fc = FileChannel.open(file.toPath(),
-                StandardOpenOption.CREATE,
-                StandardOpenOption.READ,
-                StandardOpenOption.WRITE);
-        this.numberOfBlocks.set((int) Math.ceil(1f * this.fc.size() / blockSizeBytes));
-
-        log.info("Constructing BlockDisk, blockSizeBytes [{0}]", blockSizeBytes);
-
-        this.blockSizeBytes = blockSizeBytes;
-        this.elementSerializer = elementSerializer;
-    }
-
-    /**
-     * Allocate a given number of blocks from the available set
-     *
-     * @param numBlocksNeeded
-     * @return an array of allocated blocks
-     */
-    private int[] allocateBlocks(final int numBlocksNeeded)
-    {
-        assert numBlocksNeeded >= 1;
-
-        final int[] blocks = new int[numBlocksNeeded];
-        // get them from the empty list or take the next one
-        for (int i = 0; i < numBlocksNeeded; i++)
-        {
-            Integer emptyBlock = emptyBlocks.poll();
-            if (emptyBlock == null)
-            {
-                emptyBlock = Integer.valueOf(numberOfBlocks.getAndIncrement());
-            }
-            blocks[i] = emptyBlock.intValue();
-        }
-
-        return blocks;
-    }
-
-    /**
-     * This writes an object to disk and returns the blocks it was stored in.
-     * <p>
-     * The program flow is as follows:
-     * <ol>
-     * <li>Serialize the object.</li>
-     * <li>Determine the number of blocks needed.</li>
-     * <li>Look for free blocks in the emptyBlock list.</li>
-     * <li>If there were not enough in the empty list. Take the nextBlock and increment it.</li>
-     * <li>If the data will not fit in one block, create sub arrays.</li>
-     * <li>Write the subarrays to disk.</li>
-     * <li>If the process fails we should decrement the block count if we took from it.</li>
-     * </ol>
-     * @param object
-     * @return the blocks we used.
-     * @throws IOException
-     */
-    protected <T> int[] write(final T object)
-        throws IOException
-    {
-        // serialize the object
-        final byte[] data = elementSerializer.serialize(object);
-
-        log.debug("write, total pre-chunking data.length = {0}", data.length);
-
-        this.putBytes.addAndGet(data.length);
-        this.putCount.incrementAndGet();
-
-        // figure out how many blocks we need.
-        final int numBlocksNeeded = calculateTheNumberOfBlocksNeeded(data);
-
-        log.debug("numBlocksNeeded = {0}", numBlocksNeeded);
-
-        // allocate blocks
-        final int[] blocks = allocateBlocks(numBlocksNeeded);
-
-        int offset = 0;
-        final int maxChunkSize = blockSizeBytes - HEADER_SIZE_BYTES;
-        final ByteBuffer headerBuffer = ByteBuffer.allocate(HEADER_SIZE_BYTES);
-        final ByteBuffer dataBuffer = ByteBuffer.wrap(data);
-
-        for (int i = 0; i < numBlocksNeeded; i++)
-        {
-            headerBuffer.clear();
-            final int length = Math.min(maxChunkSize, data.length - offset);
-            headerBuffer.putInt(length);
-            headerBuffer.flip();
-
-            dataBuffer.position(offset).limit(offset + length);
-            final ByteBuffer slice = dataBuffer.slice();
-
-            final long position = calculateByteOffsetForBlockAsLong(blocks[i]);
-            // write the header
-            int written = fc.write(headerBuffer, position);
-            assert written == HEADER_SIZE_BYTES;
-
-            //write the data
-            written = fc.write(slice, position + HEADER_SIZE_BYTES);
-            assert written == length;
-
-            offset += length;
-        }
-
-        //fc.force(false);
-
-        return blocks;
-    }
-
-    /**
-     * Return the amount to put in each block. Fill them all the way, minus the header.
-     * <p>
-     * @param complete
-     * @param numBlocksNeeded
-     * @return byte[][]
-     */
-    protected byte[][] getBlockChunks(final byte[] complete, final int numBlocksNeeded)
-    {
-        final byte[][] chunks = new byte[numBlocksNeeded][];
-
-        if (numBlocksNeeded == 1)
-        {
-            chunks[0] = complete;
-        }
-        else
-        {
-            final int maxChunkSize = this.blockSizeBytes - HEADER_SIZE_BYTES;
-            final int totalBytes = complete.length;
-            int totalUsed = 0;
-            for (short i = 0; i < numBlocksNeeded; i++)
-            {
-                // use the max that can be written to a block or whatever is left in the original
-                // array
-                final int chunkSize = Math.min(maxChunkSize, totalBytes - totalUsed);
-                final byte[] chunk = new byte[chunkSize];
-                // copy from the used position to the chunk size on the complete array to the chunk
-                // array.
-                System.arraycopy(complete, totalUsed, chunk, 0, chunkSize);
-                chunks[i] = chunk;
-                totalUsed += chunkSize;
-            }
-        }
-
-        return chunks;
-    }
-
-    /**
-     * Reads an object that is located in the specified blocks.
-     * <p>
-     * @param blockNumbers
-     * @return the object instance
-     * @throws IOException
-     * @throws ClassNotFoundException
-     */
-    protected <T> T read(final int[] blockNumbers)
-        throws IOException, ClassNotFoundException
-    {
-        final ByteBuffer data;
-
-        if (blockNumbers.length == 1)
-        {
-            data = readBlock(blockNumbers[0]);
-        }
-        else
-        {
-            data = ByteBuffer.allocate(blockNumbers.length * getBlockSizeBytes());
-            // get all the blocks into data
-            for (short i = 0; i < blockNumbers.length; i++)
-            {
-                final ByteBuffer chunk = readBlock(blockNumbers[i]);
-                data.put(chunk);
-            }
-
-            data.flip();
-        }
-
-        log.debug("read, total post combination data.length = {0}", () -> data.limit());
-
-        return elementSerializer.deSerialize(data.array(), null);
-    }
-
-    /**
-     * This reads the occupied data in a block.
-     * <p>
-     * The first four bytes of the record should tell us how long it is. The data is read into a
-     * byte array and then an object is constructed from the byte array.
-     * <p>
-     * @return byte[]
-     * @param block
-     * @throws IOException
-     */
-    private ByteBuffer readBlock(final int block)
-        throws IOException
-    {
-        int datalen = 0;
-
-        String message = null;
-        boolean corrupted = false;
-        final long fileLength = fc.size();
-
-        final long position = calculateByteOffsetForBlockAsLong(block);
-//        if (position > fileLength)
-//        {
-//            corrupted = true;
-//            message = "Record " + position + " starts past EOF.";
-//        }
-//        else
-        {
-            final ByteBuffer datalength = ByteBuffer.allocate(HEADER_SIZE_BYTES);
-            fc.read(datalength, position);
-            datalength.flip();
-            datalen = datalength.getInt();
-            if (position + datalen > fileLength)
-            {
-                corrupted = true;
-                message = "Record " + position + " exceeds file length.";
-            }
-        }
-
-        if (corrupted)
-        {
-            log.warn("\n The file is corrupt: \n {0}", message);
-            throw new IOException("The File Is Corrupt, need to reset");
-        }
-
-        final ByteBuffer data = ByteBuffer.allocate(datalen);
-        fc.read(data, position + HEADER_SIZE_BYTES);
-        data.flip();
-
-        return data;
-    }
-
-    /**
-     * Add these blocks to the emptyBlock list.
-     * <p>
-     * @param blocksToFree
-     */
-    protected void freeBlocks(final int[] blocksToFree)
-    {
-        if (blocksToFree != null)
-        {
-            for (short i = 0; i < blocksToFree.length; i++)
-            {
-                emptyBlocks.offer(Integer.valueOf(blocksToFree[i]));
-            }
-        }
-    }
-
-    /**
-     * Calculates the file offset for a particular block.
-     * <p>
-     * @param block number
-     * @return the byte offset for this block in the file as a long
-     * @since 2.0
-     */
-    protected long calculateByteOffsetForBlockAsLong(final int block)
-    {
-        return (long) block * blockSizeBytes;
-    }
-
-    /**
-     * The number of blocks needed.
-     * <p>
-     * @param data
-     * @return the number of blocks needed to store the byte array
-     */
-    protected int calculateTheNumberOfBlocksNeeded(final byte[] data)
-    {
-        final int dataLength = data.length;
-
-        final int oneBlock = blockSizeBytes - HEADER_SIZE_BYTES;
-
-        // takes care of 0 = HEADER_SIZE_BYTES + blockSizeBytes
-        if (dataLength <= oneBlock)
-        {
-            return 1;
-        }
-
-        int dividend = dataLength / oneBlock;
-
-        if (dataLength % oneBlock != 0)
-        {
-            dividend++;
-        }
-        return dividend;
-    }
-
-    /**
-     * Returns the file length.
-     * <p>
-     * @return the size of the file.
-     * @throws IOException
-     */
-    protected long length()
-        throws IOException
-    {
-        return fc.size();
-    }
-
-    /**
-     * Closes the file.
-     * <p>
-     * @throws IOException
-     */
-    @Override
-    public void close()
-        throws IOException
-    {
-        this.numberOfBlocks.set(0);
-        this.emptyBlocks.clear();
-        fc.close();
-    }
-
-    /**
-     * Resets the file.
-     * <p>
-     * @throws IOException
-     */
-    protected synchronized void reset()
-        throws IOException
-    {
-        this.numberOfBlocks.set(0);
-        this.emptyBlocks.clear();
-        fc.truncate(0);
-        fc.force(true);
-    }
-
-    /**
-     * @return Returns the numberOfBlocks.
-     */
-    protected int getNumberOfBlocks()
-    {
-        return numberOfBlocks.get();
-    }
-
-    /**
-     * @return Returns the blockSizeBytes.
-     */
-    protected int getBlockSizeBytes()
-    {
-        return blockSizeBytes;
-    }
-
-    /**
-     * @return Returns the average size of the an element inserted.
-     */
-    protected long getAveragePutSizeBytes()
-    {
-        final long count = this.putCount.get();
-
-        if (count == 0)
-        {
-            return 0;
-        }
-        return this.putBytes.get() / count;
-    }
-
-    /**
-     * @return Returns the number of empty blocks.
-     */
-    protected int getEmptyBlocks()
-    {
-        return this.emptyBlocks.size();
-    }
-
-    /**
-     * For debugging only.
-     * <p>
-     * @return String with details.
-     */
-    @Override
-    public String toString()
-    {
-        final StringBuilder buf = new StringBuilder();
-        buf.append("\nBlock Disk ");
-        buf.append("\n  Filepath [" + filepath + "]");
-        buf.append("\n  NumberOfBlocks [" + this.numberOfBlocks.get() + "]");
-        buf.append("\n  BlockSizeBytes [" + this.blockSizeBytes + "]");
-        buf.append("\n  Put Bytes [" + this.putBytes + "]");
-        buf.append("\n  Put Count [" + this.putCount + "]");
-        buf.append("\n  Average Size [" + getAveragePutSizeBytes() + "]");
-        buf.append("\n  Empty Blocks [" + this.getEmptyBlocks() + "]");
-        try
-        {
-            buf.append("\n  Length [" + length() + "]");
-        }
-        catch (final IOException e)
-        {
-            // swallow
-        }
-        return buf.toString();
-    }
-
-    /**
-     * This is used for debugging.
-     * <p>
-     * @return the file path.
-     */
-    protected String getFilePath()
-    {
-        return filepath;
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.block;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.StandardOpenOption;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.jcs3.engine.behavior.IElementSerializer;
+import org.apache.commons.jcs3.log.Log;
+import org.apache.commons.jcs3.log.LogManager;
+import org.apache.commons.jcs3.utils.serialization.StandardSerializer;
+
+/**
+ * This class manages reading an writing data to disk. When asked to write a value, it returns a
+ * block array. It can read an object from the block numbers in a byte array.
+ */
+public class BlockDisk implements AutoCloseable
+{
+    /** The logger */
+    private static final Log log = LogManager.getLog(BlockDisk.class);
+
+    /** The size of the header that indicates the amount of data stored in an occupied block. */
+    public static final byte HEADER_SIZE_BYTES = 4;
+    // N.B. 4 bytes is the size used for ByteBuffer.putInt(int value) and ByteBuffer.getInt()
+
+    /** defaults to 4kb */
+    private static final int DEFAULT_BLOCK_SIZE_BYTES = 4 * 1024;
+
+    /** Size of the blocks */
+    private final int blockSizeBytes;
+
+    /**
+     * the total number of blocks that have been used. If there are no free, we will use this to
+     * calculate the position of the next block.
+     */
+    private final AtomicInteger numberOfBlocks = new AtomicInteger(0);
+
+    /** Empty blocks that can be reused. */
+    private final ConcurrentLinkedQueue<Integer> emptyBlocks = new ConcurrentLinkedQueue<>();
+
+    /** The serializer. */
+    private final IElementSerializer elementSerializer;
+
+    /** Location of the spot on disk */
+    private final String filepath;
+
+    /** File channel for multiple concurrent reads and writes */
+    private final FileChannel fc;
+
+    /** How many bytes have we put to disk */
+    private final AtomicLong putBytes = new AtomicLong();
+
+    /** How many items have we put to disk */
+    private final AtomicLong putCount = new AtomicLong();
+
+    /**
+     * Constructor for the Disk object
+     * <p>
+     * @param file
+     * @param elementSerializer
+     * @throws IOException
+     */
+    public BlockDisk(final File file, final IElementSerializer elementSerializer)
+        throws IOException
+    {
+        this(file, DEFAULT_BLOCK_SIZE_BYTES, elementSerializer);
+    }
+
+    /**
+     * Creates the file and set the block size in bytes.
+     * <p>
+     * @param file
+     * @param blockSizeBytes
+     * @throws IOException
+     */
+    public BlockDisk(final File file, final int blockSizeBytes)
+        throws IOException
+    {
+        this(file, blockSizeBytes, new StandardSerializer());
+    }
+
+    /**
+     * Creates the file and set the block size in bytes.
+     * <p>
+     * @param file
+     * @param blockSizeBytes
+     * @param elementSerializer
+     * @throws IOException
+     */
+    public BlockDisk(final File file, final int blockSizeBytes, final IElementSerializer elementSerializer)
+        throws IOException
+    {
+        this.filepath = file.getAbsolutePath();
+        this.fc = FileChannel.open(file.toPath(),
+                StandardOpenOption.CREATE,
+                StandardOpenOption.READ,
+                StandardOpenOption.WRITE);
+        this.numberOfBlocks.set((int) Math.ceil(1f * this.fc.size() / blockSizeBytes));
+
+        log.info("Constructing BlockDisk, blockSizeBytes [{0}]", blockSizeBytes);
+
+        this.blockSizeBytes = blockSizeBytes;
+        this.elementSerializer = elementSerializer;
+    }
+
+    /**
+     * Allocate a given number of blocks from the available set
+     *
+     * @param numBlocksNeeded
+     * @return an array of allocated blocks
+     */
+    private int[] allocateBlocks(final int numBlocksNeeded)
+    {
+        assert numBlocksNeeded >= 1;
+
+        final int[] blocks = new int[numBlocksNeeded];
+        // get them from the empty list or take the next one
+        for (int i = 0; i < numBlocksNeeded; i++)
+        {
+            Integer emptyBlock = emptyBlocks.poll();
+            if (emptyBlock == null)
+            {
+                emptyBlock = Integer.valueOf(numberOfBlocks.getAndIncrement());
+            }
+            blocks[i] = emptyBlock.intValue();
+        }
+
+        return blocks;
+    }
+
+    /**
+     * This writes an object to disk and returns the blocks it was stored in.
+     * <p>
+     * The program flow is as follows:
+     * <ol>
+     * <li>Serialize the object.</li>
+     * <li>Determine the number of blocks needed.</li>
+     * <li>Look for free blocks in the emptyBlock list.</li>
+     * <li>If there were not enough in the empty list. Take the nextBlock and increment it.</li>
+     * <li>If the data will not fit in one block, create sub arrays.</li>
+     * <li>Write the subarrays to disk.</li>
+     * <li>If the process fails we should decrement the block count if we took from it.</li>
+     * </ol>
+     * @param object
+     * @return the blocks we used.
+     * @throws IOException
+     */
+    protected <T> int[] write(final T object)
+        throws IOException
+    {
+        // serialize the object
+        final byte[] data = elementSerializer.serialize(object);
+
+        log.debug("write, total pre-chunking data.length = {0}", data.length);
+
+        this.putBytes.addAndGet(data.length);
+        this.putCount.incrementAndGet();
+
+        // figure out how many blocks we need.
+        final int numBlocksNeeded = calculateTheNumberOfBlocksNeeded(data);
+
+        log.debug("numBlocksNeeded = {0}", numBlocksNeeded);
+
+        // allocate blocks
+        final int[] blocks = allocateBlocks(numBlocksNeeded);
+
+        int offset = 0;
+        final int maxChunkSize = blockSizeBytes - HEADER_SIZE_BYTES;
+        final ByteBuffer headerBuffer = ByteBuffer.allocate(HEADER_SIZE_BYTES);
+        final ByteBuffer dataBuffer = ByteBuffer.wrap(data);
+
+        for (int i = 0; i < numBlocksNeeded; i++)
+        {
+            headerBuffer.clear();
+            final int length = Math.min(maxChunkSize, data.length - offset);
+            headerBuffer.putInt(length);
+            headerBuffer.flip();
+
+            dataBuffer.position(offset).limit(offset + length);
+            final ByteBuffer slice = dataBuffer.slice();
+
+            final long position = calculateByteOffsetForBlockAsLong(blocks[i]);
+            // write the header
+            int written = fc.write(headerBuffer, position);
+            assert written == HEADER_SIZE_BYTES;
+
+            //write the data
+            written = fc.write(slice, position + HEADER_SIZE_BYTES);
+            assert written == length;
+
+            offset += length;
+        }
+
+        //fc.force(false);
+
+        return blocks;
+    }
+
+    /**
+     * Return the amount to put in each block. Fill them all the way, minus the header.
+     * <p>
+     * @param complete
+     * @param numBlocksNeeded
+     * @return byte[][]
+     */
+    protected byte[][] getBlockChunks(final byte[] complete, final int numBlocksNeeded)
+    {
+        final byte[][] chunks = new byte[numBlocksNeeded][];
+
+        if (numBlocksNeeded == 1)
+        {
+            chunks[0] = complete;
+        }
+        else
+        {
+            final int maxChunkSize = this.blockSizeBytes - HEADER_SIZE_BYTES;
+            final int totalBytes = complete.length;
+            int totalUsed = 0;
+            for (short i = 0; i < numBlocksNeeded; i++)
+            {
+                // use the max that can be written to a block or whatever is left in the original
+                // array
+                final int chunkSize = Math.min(maxChunkSize, totalBytes - totalUsed);
+                final byte[] chunk = new byte[chunkSize];
+                // copy from the used position to the chunk size on the complete array to the chunk
+                // array.
+                System.arraycopy(complete, totalUsed, chunk, 0, chunkSize);
+                chunks[i] = chunk;
+                totalUsed += chunkSize;
+            }
+        }
+
+        return chunks;
+    }
+
+    /**
+     * Reads an object that is located in the specified blocks.
+     * <p>
+     * @param blockNumbers
+     * @return the object instance
+     * @throws IOException
+     * @throws ClassNotFoundException
+     */
+    protected <T> T read(final int[] blockNumbers)
+        throws IOException, ClassNotFoundException
+    {
+        final ByteBuffer data;
+
+        if (blockNumbers.length == 1)
+        {
+            data = readBlock(blockNumbers[0]);
+        }
+        else
+        {
+            data = ByteBuffer.allocate(blockNumbers.length * getBlockSizeBytes());
+            // get all the blocks into data
+            for (short i = 0; i < blockNumbers.length; i++)
+            {
+                final ByteBuffer chunk = readBlock(blockNumbers[i]);
+                data.put(chunk);
+            }
+
+            data.flip();
+        }
+
+        log.debug("read, total post combination data.length = {0}", () -> data.limit());
+
+        return elementSerializer.deSerialize(data.array(), null);
+    }
+
+    /**
+     * This reads the occupied data in a block.
+     * <p>
+     * The first four bytes of the record should tell us how long it is. The data is read into a
+     * byte array and then an object is constructed from the byte array.
+     * <p>
+     * @return byte[]
+     * @param block
+     * @throws IOException
+     */
+    private ByteBuffer readBlock(final int block)
+        throws IOException
+    {
+        int datalen = 0;
+
+        String message = null;
+        boolean corrupted = false;
+        final long fileLength = fc.size();
+
+        final long position = calculateByteOffsetForBlockAsLong(block);
+//        if (position > fileLength)
+//        {
+//            corrupted = true;
+//            message = "Record " + position + " starts past EOF.";
+//        }
+//        else
+        {
+            final ByteBuffer datalength = ByteBuffer.allocate(HEADER_SIZE_BYTES);
+            fc.read(datalength, position);
+            datalength.flip();
+            datalen = datalength.getInt();
+            if (position + datalen > fileLength)
+            {
+                corrupted = true;
+                message = "Record " + position + " exceeds file length.";
+            }
+        }
+
+        if (corrupted)
+        {
+            log.warn("\n The file is corrupt: \n {0}", message);
+            throw new IOException("The File Is Corrupt, need to reset");
+        }
+
+        final ByteBuffer data = ByteBuffer.allocate(datalen);
+        fc.read(data, position + HEADER_SIZE_BYTES);
+        data.flip();
+
+        return data;
+    }
+
+    /**
+     * Add these blocks to the emptyBlock list.
+     * <p>
+     * @param blocksToFree
+     */
+    protected void freeBlocks(final int[] blocksToFree)
+    {
+        if (blocksToFree != null)
+        {
+            for (short i = 0; i < blocksToFree.length; i++)
+            {
+                emptyBlocks.offer(Integer.valueOf(blocksToFree[i]));
+            }
+        }
+    }
+
+    /**
+     * Calculates the file offset for a particular block.
+     * <p>
+     * @param block number
+     * @return the byte offset for this block in the file as a long
+     * @since 2.0
+     */
+    protected long calculateByteOffsetForBlockAsLong(final int block)
+    {
+        return (long) block * blockSizeBytes;
+    }
+
+    /**
+     * The number of blocks needed.
+     * <p>
+     * @param data
+     * @return the number of blocks needed to store the byte array
+     */
+    protected int calculateTheNumberOfBlocksNeeded(final byte[] data)
+    {
+        final int dataLength = data.length;
+
+        final int oneBlock = blockSizeBytes - HEADER_SIZE_BYTES;
+
+        // takes care of 0 = HEADER_SIZE_BYTES + blockSizeBytes
+        if (dataLength <= oneBlock)
+        {
+            return 1;
+        }
+
+        int dividend = dataLength / oneBlock;
+
+        if (dataLength % oneBlock != 0)
+        {
+            dividend++;
+        }
+        return dividend;
+    }
+
+    /**
+     * Returns the file length.
+     * <p>
+     * @return the size of the file.
+     * @throws IOException
+     */
+    protected long length()
+        throws IOException
+    {
+        return fc.size();
+    }
+
+    /**
+     * Closes the file.
+     * <p>
+     * @throws IOException
+     */
+    @Override
+    public void close()
+        throws IOException
+    {
+        this.numberOfBlocks.set(0);
+        this.emptyBlocks.clear();
+        fc.close();
+    }
+
+    /**
+     * Resets the file.
+     * <p>
+     * @throws IOException
+     */
+    protected synchronized void reset()
+        throws IOException
+    {
+        this.numberOfBlocks.set(0);
+        this.emptyBlocks.clear();
+        fc.truncate(0);
+        fc.force(true);
+    }
+
+    /**
+     * @return Returns the numberOfBlocks.
+     */
+    protected int getNumberOfBlocks()
+    {
+        return numberOfBlocks.get();
+    }
+
+    /**
+     * @return Returns the blockSizeBytes.
+     */
+    protected int getBlockSizeBytes()
+    {
+        return blockSizeBytes;
+    }
+
+    /**
+     * @return Returns the average size of the an element inserted.
+     */
+    protected long getAveragePutSizeBytes()
+    {
+        final long count = this.putCount.get();
+
+        if (count == 0)
+        {
+            return 0;
+        }
+        return this.putBytes.get() / count;
+    }
+
+    /**
+     * @return Returns the number of empty blocks.
+     */
+    protected int getEmptyBlocks()
+    {
+        return this.emptyBlocks.size();
+    }
+
+    /**
+     * For debugging only.
+     * <p>
+     * @return String with details.
+     */
+    @Override
+    public String toString()
+    {
+        final StringBuilder buf = new StringBuilder();
+        buf.append("\nBlock Disk ");
+        buf.append("\n  Filepath [" + filepath + "]");
+        buf.append("\n  NumberOfBlocks [" + this.numberOfBlocks.get() + "]");
+        buf.append("\n  BlockSizeBytes [" + this.blockSizeBytes + "]");
+        buf.append("\n  Put Bytes [" + this.putBytes + "]");
+        buf.append("\n  Put Count [" + this.putCount + "]");
+        buf.append("\n  Average Size [" + getAveragePutSizeBytes() + "]");
+        buf.append("\n  Empty Blocks [" + this.getEmptyBlocks() + "]");
+        try
+        {
+            buf.append("\n  Length [" + length() + "]");
+        }
+        catch (final IOException e)
+        {
+            // swallow
+        }
+        return buf.toString();
+    }
+
+    /**
+     * This is used for debugging.
+     * <p>
+     * @return the file path.
+     */
+    protected String getFilePath()
+    {
+        return filepath;
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskCache.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskCache.java
index 1fff8a41..a0809833 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskCache.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskCache.java
@@ -1,705 +1,703 @@
-package org.apache.commons.jcs3.auxiliary.disk.block;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.stream.Collectors;
-
-import org.apache.commons.jcs3.auxiliary.AuxiliaryCacheAttributes;
-import org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache;
-import org.apache.commons.jcs3.engine.behavior.ICacheElement;
-import org.apache.commons.jcs3.engine.behavior.IElementSerializer;
-import org.apache.commons.jcs3.engine.behavior.IRequireScheduler;
-import org.apache.commons.jcs3.engine.control.group.GroupAttrName;
-import org.apache.commons.jcs3.engine.control.group.GroupId;
-import org.apache.commons.jcs3.engine.stats.StatElement;
-import org.apache.commons.jcs3.engine.stats.Stats;
-import org.apache.commons.jcs3.engine.stats.behavior.IStatElement;
-import org.apache.commons.jcs3.engine.stats.behavior.IStats;
-import org.apache.commons.jcs3.log.Log;
-import org.apache.commons.jcs3.log.LogManager;
-import org.apache.commons.jcs3.utils.serialization.StandardSerializer;
-
-/**
- * There is one BlockDiskCache per region. It manages the key and data store.
- * <p>
- * @author Aaron Smuts
- */
-public class BlockDiskCache<K, V>
-    extends AbstractDiskCache<K, V>
-    implements IRequireScheduler
-{
-    /** The logger. */
-    private static final Log log = LogManager.getLog( BlockDiskCache.class );
-
-    /** The name to prefix all log messages with. */
-    private final String logCacheName;
-
-    /** The name of the file to store data. */
-    private final String fileName;
-
-    /** The data access object */
-    private BlockDisk dataFile;
-
-    /** Attributes governing the behavior of the block disk cache. */
-    private final BlockDiskCacheAttributes blockDiskCacheAttributes;
-
-    /** The root directory for keys and data. */
-    private final File rootDirectory;
-
-    /** Store, loads, and persists the keys */
-    private BlockDiskKeyStore<K> keyStore;
-
-    /**
-     * Use this lock to synchronize reads and writes to the underlying storage mechanism. We don't
-     * need a reentrant lock, since we only lock one level.
-     */
-    private final ReentrantReadWriteLock storageLock = new ReentrantReadWriteLock();
-
-    private ScheduledFuture<?> future;
-
-    /**
-     * Constructs the BlockDisk after setting up the root directory.
-     * <p>
-     * @param cacheAttributes
-     */
-    public BlockDiskCache( final BlockDiskCacheAttributes cacheAttributes )
-    {
-        this( cacheAttributes, new StandardSerializer() );
-    }
-
-    /**
-     * Constructs the BlockDisk after setting up the root directory.
-     * <p>
-     * @param cacheAttributes
-     * @param elementSerializer used if supplied, the super's super will not set a null
-     */
-    public BlockDiskCache( final BlockDiskCacheAttributes cacheAttributes, final IElementSerializer elementSerializer )
-    {
-        super( cacheAttributes );
-        setElementSerializer( elementSerializer );
-
-        this.blockDiskCacheAttributes = cacheAttributes;
-        this.logCacheName = "Region [" + getCacheName() + "] ";
-
-        log.info("{0}: Constructing BlockDiskCache with attributes {1}", logCacheName, cacheAttributes );
-
-        // Make a clean file name
-        this.fileName = getCacheName().replaceAll("[^a-zA-Z0-9-_\\.]", "_");
-        this.rootDirectory = cacheAttributes.getDiskPath();
-
-        log.info("{0}: Cache file root directory: [{1}]", logCacheName, rootDirectory);
-
-        try
-        {
-            if ( this.blockDiskCacheAttributes.getBlockSizeBytes() > 0 )
-            {
-                this.dataFile = new BlockDisk( new File( rootDirectory, fileName + ".data" ),
-                                               this.blockDiskCacheAttributes.getBlockSizeBytes(),
-                                               getElementSerializer() );
-            }
-            else
-            {
-                this.dataFile = new BlockDisk( new File( rootDirectory, fileName + ".data" ),
-                                               getElementSerializer() );
-            }
-
-            keyStore = new BlockDiskKeyStore<>( this.blockDiskCacheAttributes, this );
-
-            final boolean alright = verifyDisk();
-
-            if ( keyStore.isEmpty() || !alright )
-            {
-                this.reset();
-            }
-
-            // Initialization finished successfully, so set alive to true.
-            setAlive(true);
-            log.info("{0}: Block Disk Cache is alive.", logCacheName);
-        }
-        catch ( final IOException e )
-        {
-            log.error("{0}: Failure initializing for fileName: {1} and root directory: {2}",
-                    logCacheName, fileName, rootDirectory, e);
-        }
-    }
-
-    /**
-     * @see org.apache.commons.jcs3.engine.behavior.IRequireScheduler#setScheduledExecutorService(java.util.concurrent.ScheduledExecutorService)
-     */
-    @Override
-    public void setScheduledExecutorService(final ScheduledExecutorService scheduledExecutor)
-    {
-        // add this region to the persistence thread.
-        // TODO we might need to stagger this a bit.
-        if ( this.blockDiskCacheAttributes.getKeyPersistenceIntervalSeconds() > 0 )
-        {
-            future = scheduledExecutor.scheduleAtFixedRate(keyStore::saveKeys,
-                    this.blockDiskCacheAttributes.getKeyPersistenceIntervalSeconds(),
-                    this.blockDiskCacheAttributes.getKeyPersistenceIntervalSeconds(),
-                    TimeUnit.SECONDS);
-        }
-    }
-
-    /**
-     * We need to verify that the file on disk uses the same block size and that the file is the
-     * proper size.
-     * <p>
-     * @return true if it looks ok
-     */
-    protected boolean verifyDisk()
-    {
-        boolean alright = false;
-        // simply try to read a few. If it works, then the file is probably ok.
-        // TODO add more.
-
-        storageLock.readLock().lock();
-
-        try
-        {
-            this.keyStore.entrySet().stream()
-                .limit(100)
-                .forEach(entry -> {
-                    try
-                    {
-                        final Object data = this.dataFile.read(entry.getValue());
-                        if ( data == null )
-                        {
-                            throw new IOException("Data is null");
-                        }
-                    }
-                    catch (final IOException | ClassNotFoundException e)
-                    {
-                        throw new RuntimeException(logCacheName
-                                + " Couldn't find data for key [" + entry.getKey() + "]", e);
-                    }
-                });
-            alright = true;
-        }
-        catch ( final Exception e )
-        {
-            log.warn("{0}: Problem verifying disk.", logCacheName, e);
-            alright = false;
-        }
-        finally
-        {
-            storageLock.readLock().unlock();
-        }
-
-        return alright;
-    }
-
-    /**
-     * Return the keys in this cache.
-     * <p>
-     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#getKeySet()
-     */
-    @Override
-    public Set<K> getKeySet() throws IOException
-    {
-        final HashSet<K> keys = new HashSet<>();
-
-        storageLock.readLock().lock();
-
-        try
-        {
-            keys.addAll(this.keyStore.keySet());
-        }
-        finally
-        {
-            storageLock.readLock().unlock();
-        }
-
-        return keys;
-    }
-
-    /**
-     * Gets matching items from the cache.
-     * <p>
-     * @param pattern
-     * @return a map of K key to ICacheElement&lt;K, V&gt; element, or an empty map if there is no
-     *         data in cache matching keys
-     */
-    @Override
-    public Map<K, ICacheElement<K, V>> processGetMatching( final String pattern )
-    {
-        Set<K> keyArray = null;
-        storageLock.readLock().lock();
-        try
-        {
-            keyArray = new HashSet<>(keyStore.keySet());
-        }
-        finally
-        {
-            storageLock.readLock().unlock();
-        }
-
-        final Set<K> matchingKeys = getKeyMatcher().getMatchingKeysFromArray( pattern, keyArray );
-
-        return matchingKeys.stream()
-            .collect(Collectors.toMap(
-                    key -> key,
-                    this::processGet)).entrySet().stream()
-                .filter(entry -> entry.getValue() != null)
-                .collect(Collectors.toMap(
-                        Entry::getKey,
-                        Entry::getValue));
-    }
-
-    /**
-     * Returns the number of keys.
-     * <p>
-     * (non-Javadoc)
-     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#getSize()
-     */
-    @Override
-    public int getSize()
-    {
-        return this.keyStore.size();
-    }
-
-    /**
-     * Gets the ICacheElement&lt;K, V&gt; for the key if it is in the cache. The program flow is as follows:
-     * <ol>
-     * <li>Make sure the disk cache is alive.</li> <li>Get a read lock.</li> <li>See if the key is
-     * in the key store.</li> <li>If we found a key, ask the BlockDisk for the object at the
-     * blocks..</li> <li>Release the lock.</li>
-     * </ol>
-     * @param key
-     * @return ICacheElement
-     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#get(Object)
-     */
-    @Override
-    protected ICacheElement<K, V> processGet( final K key )
-    {
-        if ( !isAlive() )
-        {
-            log.debug("{0}: No longer alive so returning null for key = {1}", logCacheName, key );
-            return null;
-        }
-
-        log.debug("{0}: Trying to get from disk: {1}", logCacheName, key );
-
-        ICacheElement<K, V> object = null;
-
-
-        try
-        {
-            storageLock.readLock().lock();
-            try {
-                final int[] ded = this.keyStore.get( key );
-                if ( ded != null )
-                {
-                    object = this.dataFile.read( ded );
-                }
-            } finally {
-                storageLock.readLock().unlock();
-            }
-
-        }
-        catch ( final IOException ioe )
-        {
-            log.error("{0}: Failure getting from disk--IOException, key = {1}", logCacheName, key, ioe );
-            reset();
-        }
-        catch ( final Exception e )
-        {
-            log.error("{0}: Failure getting from disk, key = {1}", logCacheName, key, e );
-        }
-        return object;
-    }
-
-    /**
-     * Writes an element to disk. The program flow is as follows:
-     * <ol>
-     * <li>Acquire write lock.</li> <li>See id an item exists for this key.</li> <li>If an item
-     * already exists, add its blocks to the remove list.</li> <li>Have the Block disk write the
-     * item.</li> <li>Create a descriptor and add it to the key map.</li> <li>Release the write
-     * lock.</li>
-     * </ol>
-     * @param element
-     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#update(ICacheElement)
-     */
-    @Override
-    protected void processUpdate( final ICacheElement<K, V> element )
-    {
-        if ( !isAlive() )
-        {
-            log.debug("{0}: No longer alive; aborting put of key = {1}",
-                    () -> logCacheName, element::getKey);
-            return;
-        }
-
-        int[] old = null;
-
-        // make sure this only locks for one particular cache region
-        storageLock.writeLock().lock();
-
-        try
-        {
-            old = this.keyStore.get( element.getKey() );
-
-            if ( old != null )
-            {
-                this.dataFile.freeBlocks( old );
-            }
-
-            final int[] blocks = this.dataFile.write( element );
-
-            this.keyStore.put( element.getKey(), blocks );
-
-            log.debug("{0}: Put to file [{1}] key [{2}]", () -> logCacheName,
-                    () -> fileName, element::getKey);
-        }
-        catch ( final IOException e )
-        {
-            log.error("{0}: Failure updating element, key: {1} old: {2}",
-                    logCacheName, element.getKey(), Arrays.toString(old), e);
-        }
-        finally
-        {
-            storageLock.writeLock().unlock();
-        }
-
-        log.debug("{0}: Storing element on disk, key: {1}", () -> logCacheName,
-                element::getKey);
-    }
-
-    /**
-     * Returns true if the removal was successful; or false if there is nothing to remove. Current
-     * implementation always result in a disk orphan.
-     * <p>
-     * @param key
-     * @return true if removed anything
-     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#remove(Object)
-     */
-    @Override
-    protected boolean processRemove( final K key )
-    {
-        if ( !isAlive() )
-        {
-            log.debug("{0}: No longer alive so returning false for key = {1}", logCacheName, key );
-            return false;
-        }
-
-        boolean reset = false;
-        boolean removed = false;
-
-        storageLock.writeLock().lock();
-
-        try
-        {
-            if (key instanceof String && key.toString().endsWith(NAME_COMPONENT_DELIMITER))
-            {
-                removed = performPartialKeyRemoval((String) key);
-            }
-            else if (key instanceof GroupAttrName && ((GroupAttrName<?>) key).attrName == null)
-            {
-                removed = performGroupRemoval(((GroupAttrName<?>) key).groupId);
-            }
-            else
-            {
-                removed = performSingleKeyRemoval(key);
-            }
-        }
-        catch ( final Exception e )
-        {
-            log.error("{0}: Problem removing element.", logCacheName, e );
-            reset = true;
-        }
-        finally
-        {
-            storageLock.writeLock().unlock();
-        }
-
-        if ( reset )
-        {
-            reset();
-        }
-
-        return removed;
-    }
-
-    /**
-     * Remove all elements from the group. This does not use the iterator to remove. It builds a
-     * list of group elements and then removes them one by one.
-     * <p>
-     * This operates under a lock obtained in doRemove().
-     * <p>
-     *
-     * @param key
-     * @return true if an element was removed
-     */
-    private boolean performGroupRemoval(final GroupId key)
-    {
-        // remove all keys of the same name group.
-        final List<K> itemsToRemove = keyStore.keySet()
-                .stream()
-                .filter(k -> k instanceof GroupAttrName && ((GroupAttrName<?>) k).groupId.equals(key))
-                .collect(Collectors.toList());
-
-        // remove matches.
-        // Don't add to recycle bin here
-        // https://issues.apache.org/jira/browse/JCS-67
-        itemsToRemove.forEach(this::performSingleKeyRemoval);
-        // TODO this needs to update the remove count separately
-
-        return !itemsToRemove.isEmpty();
-    }
-
-    /**
-     * Iterates over the keyset. Builds a list of matches. Removes all the keys in the list. Does
-     * not remove via the iterator, since the map impl may not support it.
-     * <p>
-     * This operates under a lock obtained in doRemove().
-     * <p>
-     *
-     * @param key
-     * @return true if there was a match
-     */
-    private boolean performPartialKeyRemoval(final String key)
-    {
-        // remove all keys of the same name hierarchy.
-        final List<K> itemsToRemove = keyStore.keySet()
-                .stream()
-                .filter(k -> k instanceof String && k.toString().startsWith(key))
-                .collect(Collectors.toList());
-
-        // remove matches.
-        // Don't add to recycle bin here
-        // https://issues.apache.org/jira/browse/JCS-67
-        itemsToRemove.forEach(this::performSingleKeyRemoval);
-        // TODO this needs to update the remove count separately
-
-        return !itemsToRemove.isEmpty();
-    }
-
-
-	private boolean performSingleKeyRemoval(final K key) {
-		final boolean removed;
-		// remove single item.
-		final int[] ded = this.keyStore.remove( key );
-		removed = ded != null;
-		if ( removed )
-		{
-		    this.dataFile.freeBlocks( ded );
-		}
-
-	    log.debug("{0}: Disk removal: Removed from key hash, key [{1}] removed = {2}",
-	            logCacheName, key, removed);
-		return removed;
-	}
-
-    /**
-     * Resets the keyfile, the disk file, and the memory key map.
-     * <p>
-     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#removeAll()
-     */
-    @Override
-    protected void processRemoveAll()
-    {
-        reset();
-    }
-
-    /**
-     * Dispose of the disk cache in a background thread. Joins against this thread to put a cap on
-     * the disposal time.
-     * <p>
-     * TODO make dispose window configurable.
-     */
-    @Override
-    public void processDispose()
-    {
-        final Thread t = new Thread(this::disposeInternal, "BlockDiskCache-DisposalThread" );
-        t.start();
-        // wait up to 60 seconds for dispose and then quit if not done.
-        try
-        {
-            t.join( 60 * 1000 );
-        }
-        catch ( final InterruptedException ex )
-        {
-            log.error("{0}: Interrupted while waiting for disposal thread to finish.",
-                    logCacheName, ex );
-        }
-    }
-
-    /**
-     * Internal method that handles the disposal.
-     */
-    protected void disposeInternal()
-    {
-        if ( !isAlive() )
-        {
-            log.error("{0}: Not alive and dispose was called, filename: {1}", logCacheName, fileName);
-            return;
-        }
-        storageLock.writeLock().lock();
-        try
-        {
-            // Prevents any interaction with the cache while we're shutting down.
-            setAlive(false);
-            this.keyStore.saveKeys();
-
-            if (future != null)
-            {
-                future.cancel(true);
-            }
-
-            try
-            {
-                log.debug("{0}: Closing files, base filename: {1}", logCacheName, fileName );
-                dataFile.close();
-                // dataFile = null;
-            }
-            catch ( final IOException e )
-            {
-                log.error("{0}: Failure closing files in dispose, filename: {1}",
-                        logCacheName, fileName, e );
-            }
-        }
-        finally
-        {
-            storageLock.writeLock().unlock();
-        }
-
-        log.info("{0}: Shutdown complete.", logCacheName);
-    }
-
-    /**
-     * Returns the attributes.
-     * <p>
-     * @see org.apache.commons.jcs3.auxiliary.AuxiliaryCache#getAuxiliaryCacheAttributes()
-     */
-    @Override
-    public AuxiliaryCacheAttributes getAuxiliaryCacheAttributes()
-    {
-        return this.blockDiskCacheAttributes;
-    }
-
-    /**
-     * Reset effectively clears the disk cache, creating new files, recycle bins, and keymaps.
-     * <p>
-     * It can be used to handle errors by last resort, force content update, or remove all.
-     */
-    private void reset()
-    {
-        log.info("{0}: Resetting cache", logCacheName);
-
-        try
-        {
-            storageLock.writeLock().lock();
-
-            this.keyStore.reset();
-
-            if ( dataFile != null )
-            {
-                dataFile.reset();
-            }
-        }
-        catch ( final IOException e )
-        {
-            log.error("{0}: Failure resetting state", logCacheName, e );
-        }
-        finally
-        {
-            storageLock.writeLock().unlock();
-        }
-    }
-
-    /**
-     * Add these blocks to the emptyBlock list.
-     * <p>
-     * @param blocksToFree
-     */
-    protected void freeBlocks( final int[] blocksToFree )
-    {
-        this.dataFile.freeBlocks( blocksToFree );
-    }
-
-    /**
-     * Returns info about the disk cache.
-     * <p>
-     * @see org.apache.commons.jcs3.auxiliary.AuxiliaryCache#getStatistics()
-     */
-    @Override
-    public IStats getStatistics()
-    {
-        final IStats stats = new Stats();
-        stats.setTypeName( "Block Disk Cache" );
-
-        final ArrayList<IStatElement<?>> elems = new ArrayList<>();
-
-        elems.add(new StatElement<>( "Is Alive", Boolean.valueOf(isAlive()) ) );
-        elems.add(new StatElement<>( "Key Map Size", Integer.valueOf(this.keyStore.size()) ) );
-
-        if (this.dataFile != null)
-        {
-            try
-            {
-                elems.add(new StatElement<>( "Data File Length", Long.valueOf(this.dataFile.length()) ) );
-            }
-            catch ( final IOException e )
-            {
-                log.error( e );
-            }
-
-            elems.add(new StatElement<>( "Block Size Bytes",
-                    Integer.valueOf(this.dataFile.getBlockSizeBytes()) ) );
-            elems.add(new StatElement<>( "Number Of Blocks",
-                    Integer.valueOf(this.dataFile.getNumberOfBlocks()) ) );
-            elems.add(new StatElement<>( "Average Put Size Bytes",
-                    Long.valueOf(this.dataFile.getAveragePutSizeBytes()) ) );
-            elems.add(new StatElement<>( "Empty Blocks",
-                    Integer.valueOf(this.dataFile.getEmptyBlocks()) ) );
-        }
-
-        // get the stats from the super too
-        final IStats sStats = super.getStatistics();
-        elems.addAll(sStats.getStatElements());
-
-        stats.setStatElements( elems );
-
-        return stats;
-    }
-
-    /**
-     * This is used by the event logging.
-     * <p>
-     * @return the location of the disk, either path or ip.
-     */
-    @Override
-    protected String getDiskLocation()
-    {
-        return dataFile.getFilePath();
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.block;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.stream.Collectors;
+
+import org.apache.commons.jcs3.auxiliary.AuxiliaryCacheAttributes;
+import org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache;
+import org.apache.commons.jcs3.engine.behavior.ICacheElement;
+import org.apache.commons.jcs3.engine.behavior.IElementSerializer;
+import org.apache.commons.jcs3.engine.behavior.IRequireScheduler;
+import org.apache.commons.jcs3.engine.control.group.GroupAttrName;
+import org.apache.commons.jcs3.engine.control.group.GroupId;
+import org.apache.commons.jcs3.engine.stats.StatElement;
+import org.apache.commons.jcs3.engine.stats.Stats;
+import org.apache.commons.jcs3.engine.stats.behavior.IStatElement;
+import org.apache.commons.jcs3.engine.stats.behavior.IStats;
+import org.apache.commons.jcs3.log.Log;
+import org.apache.commons.jcs3.log.LogManager;
+import org.apache.commons.jcs3.utils.serialization.StandardSerializer;
+
+/**
+ * There is one BlockDiskCache per region. It manages the key and data store.
+ */
+public class BlockDiskCache<K, V>
+    extends AbstractDiskCache<K, V>
+    implements IRequireScheduler
+{
+    /** The logger. */
+    private static final Log log = LogManager.getLog( BlockDiskCache.class );
+
+    /** The name to prefix all log messages with. */
+    private final String logCacheName;
+
+    /** The name of the file to store data. */
+    private final String fileName;
+
+    /** The data access object */
+    private BlockDisk dataFile;
+
+    /** Attributes governing the behavior of the block disk cache. */
+    private final BlockDiskCacheAttributes blockDiskCacheAttributes;
+
+    /** The root directory for keys and data. */
+    private final File rootDirectory;
+
+    /** Store, loads, and persists the keys */
+    private BlockDiskKeyStore<K> keyStore;
+
+    /**
+     * Use this lock to synchronize reads and writes to the underlying storage mechanism. We don't
+     * need a reentrant lock, since we only lock one level.
+     */
+    private final ReentrantReadWriteLock storageLock = new ReentrantReadWriteLock();
+
+    private ScheduledFuture<?> future;
+
+    /**
+     * Constructs the BlockDisk after setting up the root directory.
+     * <p>
+     * @param cacheAttributes
+     */
+    public BlockDiskCache( final BlockDiskCacheAttributes cacheAttributes )
+    {
+        this( cacheAttributes, new StandardSerializer() );
+    }
+
+    /**
+     * Constructs the BlockDisk after setting up the root directory.
+     * <p>
+     * @param cacheAttributes
+     * @param elementSerializer used if supplied, the super's super will not set a null
+     */
+    public BlockDiskCache( final BlockDiskCacheAttributes cacheAttributes, final IElementSerializer elementSerializer )
+    {
+        super( cacheAttributes );
+        setElementSerializer( elementSerializer );
+
+        this.blockDiskCacheAttributes = cacheAttributes;
+        this.logCacheName = "Region [" + getCacheName() + "] ";
+
+        log.info("{0}: Constructing BlockDiskCache with attributes {1}", logCacheName, cacheAttributes );
+
+        // Make a clean file name
+        this.fileName = getCacheName().replaceAll("[^a-zA-Z0-9-_\\.]", "_");
+        this.rootDirectory = cacheAttributes.getDiskPath();
+
+        log.info("{0}: Cache file root directory: [{1}]", logCacheName, rootDirectory);
+
+        try
+        {
+            if ( this.blockDiskCacheAttributes.getBlockSizeBytes() > 0 )
+            {
+                this.dataFile = new BlockDisk( new File( rootDirectory, fileName + ".data" ),
+                                               this.blockDiskCacheAttributes.getBlockSizeBytes(),
+                                               getElementSerializer() );
+            }
+            else
+            {
+                this.dataFile = new BlockDisk( new File( rootDirectory, fileName + ".data" ),
+                                               getElementSerializer() );
+            }
+
+            keyStore = new BlockDiskKeyStore<>( this.blockDiskCacheAttributes, this );
+
+            final boolean alright = verifyDisk();
+
+            if ( keyStore.isEmpty() || !alright )
+            {
+                this.reset();
+            }
+
+            // Initialization finished successfully, so set alive to true.
+            setAlive(true);
+            log.info("{0}: Block Disk Cache is alive.", logCacheName);
+        }
+        catch ( final IOException e )
+        {
+            log.error("{0}: Failure initializing for fileName: {1} and root directory: {2}",
+                    logCacheName, fileName, rootDirectory, e);
+        }
+    }
+
+    /**
+     * @see org.apache.commons.jcs3.engine.behavior.IRequireScheduler#setScheduledExecutorService(java.util.concurrent.ScheduledExecutorService)
+     */
+    @Override
+    public void setScheduledExecutorService(final ScheduledExecutorService scheduledExecutor)
+    {
+        // add this region to the persistence thread.
+        // TODO we might need to stagger this a bit.
+        if ( this.blockDiskCacheAttributes.getKeyPersistenceIntervalSeconds() > 0 )
+        {
+            future = scheduledExecutor.scheduleAtFixedRate(keyStore::saveKeys,
+                    this.blockDiskCacheAttributes.getKeyPersistenceIntervalSeconds(),
+                    this.blockDiskCacheAttributes.getKeyPersistenceIntervalSeconds(),
+                    TimeUnit.SECONDS);
+        }
+    }
+
+    /**
+     * We need to verify that the file on disk uses the same block size and that the file is the
+     * proper size.
+     * <p>
+     * @return true if it looks ok
+     */
+    protected boolean verifyDisk()
+    {
+        boolean alright = false;
+        // simply try to read a few. If it works, then the file is probably ok.
+        // TODO add more.
+
+        storageLock.readLock().lock();
+
+        try
+        {
+            this.keyStore.entrySet().stream()
+                .limit(100)
+                .forEach(entry -> {
+                    try
+                    {
+                        final Object data = this.dataFile.read(entry.getValue());
+                        if ( data == null )
+                        {
+                            throw new IOException("Data is null");
+                        }
+                    }
+                    catch (final IOException | ClassNotFoundException e)
+                    {
+                        throw new RuntimeException(logCacheName
+                                + " Couldn't find data for key [" + entry.getKey() + "]", e);
+                    }
+                });
+            alright = true;
+        }
+        catch ( final Exception e )
+        {
+            log.warn("{0}: Problem verifying disk.", logCacheName, e);
+            alright = false;
+        }
+        finally
+        {
+            storageLock.readLock().unlock();
+        }
+
+        return alright;
+    }
+
+    /**
+     * Return the keys in this cache.
+     * <p>
+     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#getKeySet()
+     */
+    @Override
+    public Set<K> getKeySet() throws IOException
+    {
+        final HashSet<K> keys = new HashSet<>();
+
+        storageLock.readLock().lock();
+
+        try
+        {
+            keys.addAll(this.keyStore.keySet());
+        }
+        finally
+        {
+            storageLock.readLock().unlock();
+        }
+
+        return keys;
+    }
+
+    /**
+     * Gets matching items from the cache.
+     * <p>
+     * @param pattern
+     * @return a map of K key to ICacheElement&lt;K, V&gt; element, or an empty map if there is no
+     *         data in cache matching keys
+     */
+    @Override
+    public Map<K, ICacheElement<K, V>> processGetMatching( final String pattern )
+    {
+        Set<K> keyArray = null;
+        storageLock.readLock().lock();
+        try
+        {
+            keyArray = new HashSet<>(keyStore.keySet());
+        }
+        finally
+        {
+            storageLock.readLock().unlock();
+        }
+
+        final Set<K> matchingKeys = getKeyMatcher().getMatchingKeysFromArray( pattern, keyArray );
+
+        return matchingKeys.stream()
+            .collect(Collectors.toMap(
+                    key -> key,
+                    this::processGet)).entrySet().stream()
+                .filter(entry -> entry.getValue() != null)
+                .collect(Collectors.toMap(
+                        Entry::getKey,
+                        Entry::getValue));
+    }
+
+    /**
+     * Returns the number of keys.
+     * <p>
+     * (non-Javadoc)
+     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#getSize()
+     */
+    @Override
+    public int getSize()
+    {
+        return this.keyStore.size();
+    }
+
+    /**
+     * Gets the ICacheElement&lt;K, V&gt; for the key if it is in the cache. The program flow is as follows:
+     * <ol>
+     * <li>Make sure the disk cache is alive.</li> <li>Get a read lock.</li> <li>See if the key is
+     * in the key store.</li> <li>If we found a key, ask the BlockDisk for the object at the
+     * blocks..</li> <li>Release the lock.</li>
+     * </ol>
+     * @param key
+     * @return ICacheElement
+     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#get(Object)
+     */
+    @Override
+    protected ICacheElement<K, V> processGet( final K key )
+    {
+        if ( !isAlive() )
+        {
+            log.debug("{0}: No longer alive so returning null for key = {1}", logCacheName, key );
+            return null;
+        }
+
+        log.debug("{0}: Trying to get from disk: {1}", logCacheName, key );
+
+        ICacheElement<K, V> object = null;
+
+
+        try
+        {
+            storageLock.readLock().lock();
+            try {
+                final int[] ded = this.keyStore.get( key );
+                if ( ded != null )
+                {
+                    object = this.dataFile.read( ded );
+                }
+            } finally {
+                storageLock.readLock().unlock();
+            }
+
+        }
+        catch ( final IOException ioe )
+        {
+            log.error("{0}: Failure getting from disk--IOException, key = {1}", logCacheName, key, ioe );
+            reset();
+        }
+        catch ( final Exception e )
+        {
+            log.error("{0}: Failure getting from disk, key = {1}", logCacheName, key, e );
+        }
+        return object;
+    }
+
+    /**
+     * Writes an element to disk. The program flow is as follows:
+     * <ol>
+     * <li>Acquire write lock.</li> <li>See id an item exists for this key.</li> <li>If an item
+     * already exists, add its blocks to the remove list.</li> <li>Have the Block disk write the
+     * item.</li> <li>Create a descriptor and add it to the key map.</li> <li>Release the write
+     * lock.</li>
+     * </ol>
+     * @param element
+     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#update(ICacheElement)
+     */
+    @Override
+    protected void processUpdate( final ICacheElement<K, V> element )
+    {
+        if ( !isAlive() )
+        {
+            log.debug("{0}: No longer alive; aborting put of key = {1}",
+                    () -> logCacheName, element::getKey);
+            return;
+        }
+
+        int[] old = null;
+
+        // make sure this only locks for one particular cache region
+        storageLock.writeLock().lock();
+
+        try
+        {
+            old = this.keyStore.get( element.getKey() );
+
+            if ( old != null )
+            {
+                this.dataFile.freeBlocks( old );
+            }
+
+            final int[] blocks = this.dataFile.write( element );
+
+            this.keyStore.put( element.getKey(), blocks );
+
+            log.debug("{0}: Put to file [{1}] key [{2}]", () -> logCacheName,
+                    () -> fileName, element::getKey);
+        }
+        catch ( final IOException e )
+        {
+            log.error("{0}: Failure updating element, key: {1} old: {2}",
+                    logCacheName, element.getKey(), Arrays.toString(old), e);
+        }
+        finally
+        {
+            storageLock.writeLock().unlock();
+        }
+
+        log.debug("{0}: Storing element on disk, key: {1}", () -> logCacheName,
+                element::getKey);
+    }
+
+    /**
+     * Returns true if the removal was successful; or false if there is nothing to remove. Current
+     * implementation always result in a disk orphan.
+     * <p>
+     * @param key
+     * @return true if removed anything
+     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#remove(Object)
+     */
+    @Override
+    protected boolean processRemove( final K key )
+    {
+        if ( !isAlive() )
+        {
+            log.debug("{0}: No longer alive so returning false for key = {1}", logCacheName, key );
+            return false;
+        }
+
+        boolean reset = false;
+        boolean removed = false;
+
+        storageLock.writeLock().lock();
+
+        try
+        {
+            if (key instanceof String && key.toString().endsWith(NAME_COMPONENT_DELIMITER))
+            {
+                removed = performPartialKeyRemoval((String) key);
+            }
+            else if (key instanceof GroupAttrName && ((GroupAttrName<?>) key).attrName == null)
+            {
+                removed = performGroupRemoval(((GroupAttrName<?>) key).groupId);
+            }
+            else
+            {
+                removed = performSingleKeyRemoval(key);
+            }
+        }
+        catch ( final Exception e )
+        {
+            log.error("{0}: Problem removing element.", logCacheName, e );
+            reset = true;
+        }
+        finally
+        {
+            storageLock.writeLock().unlock();
+        }
+
+        if ( reset )
+        {
+            reset();
+        }
+
+        return removed;
+    }
+
+    /**
+     * Remove all elements from the group. This does not use the iterator to remove. It builds a
+     * list of group elements and then removes them one by one.
+     * <p>
+     * This operates under a lock obtained in doRemove().
+     * <p>
+     *
+     * @param key
+     * @return true if an element was removed
+     */
+    private boolean performGroupRemoval(final GroupId key)
+    {
+        // remove all keys of the same name group.
+        final List<K> itemsToRemove = keyStore.keySet()
+                .stream()
+                .filter(k -> k instanceof GroupAttrName && ((GroupAttrName<?>) k).groupId.equals(key))
+                .collect(Collectors.toList());
+
+        // remove matches.
+        // Don't add to recycle bin here
+        // https://issues.apache.org/jira/browse/JCS-67
+        itemsToRemove.forEach(this::performSingleKeyRemoval);
+        // TODO this needs to update the remove count separately
+
+        return !itemsToRemove.isEmpty();
+    }
+
+    /**
+     * Iterates over the keyset. Builds a list of matches. Removes all the keys in the list. Does
+     * not remove via the iterator, since the map impl may not support it.
+     * <p>
+     * This operates under a lock obtained in doRemove().
+     * <p>
+     *
+     * @param key
+     * @return true if there was a match
+     */
+    private boolean performPartialKeyRemoval(final String key)
+    {
+        // remove all keys of the same name hierarchy.
+        final List<K> itemsToRemove = keyStore.keySet()
+                .stream()
+                .filter(k -> k instanceof String && k.toString().startsWith(key))
+                .collect(Collectors.toList());
+
+        // remove matches.
+        // Don't add to recycle bin here
+        // https://issues.apache.org/jira/browse/JCS-67
+        itemsToRemove.forEach(this::performSingleKeyRemoval);
+        // TODO this needs to update the remove count separately
+
+        return !itemsToRemove.isEmpty();
+    }
+
+
+	private boolean performSingleKeyRemoval(final K key) {
+		final boolean removed;
+		// remove single item.
+		final int[] ded = this.keyStore.remove( key );
+		removed = ded != null;
+		if ( removed )
+		{
+		    this.dataFile.freeBlocks( ded );
+		}
+
+	    log.debug("{0}: Disk removal: Removed from key hash, key [{1}] removed = {2}",
+	            logCacheName, key, removed);
+		return removed;
+	}
+
+    /**
+     * Resets the keyfile, the disk file, and the memory key map.
+     * <p>
+     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#removeAll()
+     */
+    @Override
+    protected void processRemoveAll()
+    {
+        reset();
+    }
+
+    /**
+     * Dispose of the disk cache in a background thread. Joins against this thread to put a cap on
+     * the disposal time.
+     * <p>
+     * TODO make dispose window configurable.
+     */
+    @Override
+    public void processDispose()
+    {
+        final Thread t = new Thread(this::disposeInternal, "BlockDiskCache-DisposalThread" );
+        t.start();
+        // wait up to 60 seconds for dispose and then quit if not done.
+        try
+        {
+            t.join( 60 * 1000 );
+        }
+        catch ( final InterruptedException ex )
+        {
+            log.error("{0}: Interrupted while waiting for disposal thread to finish.",
+                    logCacheName, ex );
+        }
+    }
+
+    /**
+     * Internal method that handles the disposal.
+     */
+    protected void disposeInternal()
+    {
+        if ( !isAlive() )
+        {
+            log.error("{0}: Not alive and dispose was called, filename: {1}", logCacheName, fileName);
+            return;
+        }
+        storageLock.writeLock().lock();
+        try
+        {
+            // Prevents any interaction with the cache while we're shutting down.
+            setAlive(false);
+            this.keyStore.saveKeys();
+
+            if (future != null)
+            {
+                future.cancel(true);
+            }
+
+            try
+            {
+                log.debug("{0}: Closing files, base filename: {1}", logCacheName, fileName );
+                dataFile.close();
+                // dataFile = null;
+            }
+            catch ( final IOException e )
+            {
+                log.error("{0}: Failure closing files in dispose, filename: {1}",
+                        logCacheName, fileName, e );
+            }
+        }
+        finally
+        {
+            storageLock.writeLock().unlock();
+        }
+
+        log.info("{0}: Shutdown complete.", logCacheName);
+    }
+
+    /**
+     * Returns the attributes.
+     * <p>
+     * @see org.apache.commons.jcs3.auxiliary.AuxiliaryCache#getAuxiliaryCacheAttributes()
+     */
+    @Override
+    public AuxiliaryCacheAttributes getAuxiliaryCacheAttributes()
+    {
+        return this.blockDiskCacheAttributes;
+    }
+
+    /**
+     * Reset effectively clears the disk cache, creating new files, recycle bins, and keymaps.
+     * <p>
+     * It can be used to handle errors by last resort, force content update, or remove all.
+     */
+    private void reset()
+    {
+        log.info("{0}: Resetting cache", logCacheName);
+
+        try
+        {
+            storageLock.writeLock().lock();
+
+            this.keyStore.reset();
+
+            if ( dataFile != null )
+            {
+                dataFile.reset();
+            }
+        }
+        catch ( final IOException e )
+        {
+            log.error("{0}: Failure resetting state", logCacheName, e );
+        }
+        finally
+        {
+            storageLock.writeLock().unlock();
+        }
+    }
+
+    /**
+     * Add these blocks to the emptyBlock list.
+     * <p>
+     * @param blocksToFree
+     */
+    protected void freeBlocks( final int[] blocksToFree )
+    {
+        this.dataFile.freeBlocks( blocksToFree );
+    }
+
+    /**
+     * Returns info about the disk cache.
+     * <p>
+     * @see org.apache.commons.jcs3.auxiliary.AuxiliaryCache#getStatistics()
+     */
+    @Override
+    public IStats getStatistics()
+    {
+        final IStats stats = new Stats();
+        stats.setTypeName( "Block Disk Cache" );
+
+        final ArrayList<IStatElement<?>> elems = new ArrayList<>();
+
+        elems.add(new StatElement<>( "Is Alive", Boolean.valueOf(isAlive()) ) );
+        elems.add(new StatElement<>( "Key Map Size", Integer.valueOf(this.keyStore.size()) ) );
+
+        if (this.dataFile != null)
+        {
+            try
+            {
+                elems.add(new StatElement<>( "Data File Length", Long.valueOf(this.dataFile.length()) ) );
+            }
+            catch ( final IOException e )
+            {
+                log.error( e );
+            }
+
+            elems.add(new StatElement<>( "Block Size Bytes",
+                    Integer.valueOf(this.dataFile.getBlockSizeBytes()) ) );
+            elems.add(new StatElement<>( "Number Of Blocks",
+                    Integer.valueOf(this.dataFile.getNumberOfBlocks()) ) );
+            elems.add(new StatElement<>( "Average Put Size Bytes",
+                    Long.valueOf(this.dataFile.getAveragePutSizeBytes()) ) );
+            elems.add(new StatElement<>( "Empty Blocks",
+                    Integer.valueOf(this.dataFile.getEmptyBlocks()) ) );
+        }
+
+        // get the stats from the super too
+        final IStats sStats = super.getStatistics();
+        elems.addAll(sStats.getStatElements());
+
+        stats.setStatElements( elems );
+
+        return stats;
+    }
+
+    /**
+     * This is used by the event logging.
+     * <p>
+     * @return the location of the disk, either path or ip.
+     */
+    @Override
+    protected String getDiskLocation()
+    {
+        return dataFile.getFilePath();
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskCacheAttributes.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskCacheAttributes.java
index 310db3bf..f2629ae8 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskCacheAttributes.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskCacheAttributes.java
@@ -1,118 +1,116 @@
-package org.apache.commons.jcs3.auxiliary.disk.block;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCacheAttributes;
-
-/**
- * This holds attributes for Block Disk Cache configuration.
- * <p>
- * @author Aaron Smuts
- */
-public class BlockDiskCacheAttributes
-    extends AbstractDiskCacheAttributes
-{
-    /** Don't change */
-    private static final long serialVersionUID = 6568840097657265989L;
-
-    /** The size per block in bytes. */
-    private int blockSizeBytes;
-
-    /** Maximum number of keys to be kept in memory */
-    private static final int DEFAULT_MAX_KEY_SIZE = 5000;
-
-    /** -1 means no limit. */
-    private int maxKeySize = DEFAULT_MAX_KEY_SIZE;
-
-    /** How often should we persist the keys. */
-    private static final long DEFAULT_KEY_PERSISTENCE_INTERVAL_SECONDS = 5 * 60;
-
-    /** The keys will be persisted at this interval.  -1 mean never. */
-    private long keyPersistenceIntervalSeconds = DEFAULT_KEY_PERSISTENCE_INTERVAL_SECONDS;
-
-    /**
-     * The size of the blocks. All blocks are the same size.
-     * <p>
-     * @param blockSizeBytes The blockSizeBytes to set.
-     */
-    public void setBlockSizeBytes( final int blockSizeBytes )
-    {
-        this.blockSizeBytes = blockSizeBytes;
-    }
-
-    /**
-     * @return Returns the blockSizeBytes.
-     */
-    public int getBlockSizeBytes()
-    {
-        return blockSizeBytes;
-    }
-
-    /**
-     * @param maxKeySize The maxKeySize to set.
-     */
-    public void setMaxKeySize( final int maxKeySize )
-    {
-        this.maxKeySize = maxKeySize;
-    }
-
-    /**
-     * @return Returns the maxKeySize.
-     */
-    public int getMaxKeySize()
-    {
-        return maxKeySize;
-    }
-
-    /**
-     * @param keyPersistenceIntervalSeconds The keyPersistenceIntervalSeconds to set.
-     */
-    public void setKeyPersistenceIntervalSeconds( final long keyPersistenceIntervalSeconds )
-    {
-        this.keyPersistenceIntervalSeconds = keyPersistenceIntervalSeconds;
-    }
-
-    /**
-     * @return Returns the keyPersistenceIntervalSeconds.
-     */
-    public long getKeyPersistenceIntervalSeconds()
-    {
-        return keyPersistenceIntervalSeconds;
-    }
-
-    /**
-     * Write out the values for debugging purposes.
-     * <p>
-     * @return String
-     */
-    @Override
-    public String toString()
-    {
-        final StringBuilder str = new StringBuilder();
-        str.append( "\nBlockDiskAttributes " );
-        str.append( "\n DiskPath [" + this.getDiskPath() + "]" );
-        str.append( "\n MaxKeySize [" + this.getMaxKeySize() + "]" );
-        str.append( "\n MaxPurgatorySize [" + this.getMaxPurgatorySize() + "]" );
-        str.append( "\n BlockSizeBytes [" + this.getBlockSizeBytes() + "]" );
-        str.append( "\n KeyPersistenceIntervalSeconds [" + this.getKeyPersistenceIntervalSeconds() + "]" );
-        str.append( "\n DiskLimitType [" + this.getDiskLimitType() + "]" );
-        return str.toString();
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.block;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCacheAttributes;
+
+/**
+ * This holds attributes for Block Disk Cache configuration.
+ */
+public class BlockDiskCacheAttributes
+    extends AbstractDiskCacheAttributes
+{
+    /** Don't change */
+    private static final long serialVersionUID = 6568840097657265989L;
+
+    /** The size per block in bytes. */
+    private int blockSizeBytes;
+
+    /** Maximum number of keys to be kept in memory */
+    private static final int DEFAULT_MAX_KEY_SIZE = 5000;
+
+    /** -1 means no limit. */
+    private int maxKeySize = DEFAULT_MAX_KEY_SIZE;
+
+    /** How often should we persist the keys. */
+    private static final long DEFAULT_KEY_PERSISTENCE_INTERVAL_SECONDS = 5 * 60;
+
+    /** The keys will be persisted at this interval.  -1 mean never. */
+    private long keyPersistenceIntervalSeconds = DEFAULT_KEY_PERSISTENCE_INTERVAL_SECONDS;
+
+    /**
+     * The size of the blocks. All blocks are the same size.
+     * <p>
+     * @param blockSizeBytes The blockSizeBytes to set.
+     */
+    public void setBlockSizeBytes( final int blockSizeBytes )
+    {
+        this.blockSizeBytes = blockSizeBytes;
+    }
+
+    /**
+     * @return Returns the blockSizeBytes.
+     */
+    public int getBlockSizeBytes()
+    {
+        return blockSizeBytes;
+    }
+
+    /**
+     * @param maxKeySize The maxKeySize to set.
+     */
+    public void setMaxKeySize( final int maxKeySize )
+    {
+        this.maxKeySize = maxKeySize;
+    }
+
+    /**
+     * @return Returns the maxKeySize.
+     */
+    public int getMaxKeySize()
+    {
+        return maxKeySize;
+    }
+
+    /**
+     * @param keyPersistenceIntervalSeconds The keyPersistenceIntervalSeconds to set.
+     */
+    public void setKeyPersistenceIntervalSeconds( final long keyPersistenceIntervalSeconds )
+    {
+        this.keyPersistenceIntervalSeconds = keyPersistenceIntervalSeconds;
+    }
+
+    /**
+     * @return Returns the keyPersistenceIntervalSeconds.
+     */
+    public long getKeyPersistenceIntervalSeconds()
+    {
+        return keyPersistenceIntervalSeconds;
+    }
+
+    /**
+     * Write out the values for debugging purposes.
+     * <p>
+     * @return String
+     */
+    @Override
+    public String toString()
+    {
+        final StringBuilder str = new StringBuilder();
+        str.append( "\nBlockDiskAttributes " );
+        str.append( "\n DiskPath [" + this.getDiskPath() + "]" );
+        str.append( "\n MaxKeySize [" + this.getMaxKeySize() + "]" );
+        str.append( "\n MaxPurgatorySize [" + this.getMaxPurgatorySize() + "]" );
+        str.append( "\n BlockSizeBytes [" + this.getBlockSizeBytes() + "]" );
+        str.append( "\n KeyPersistenceIntervalSeconds [" + this.getKeyPersistenceIntervalSeconds() + "]" );
+        str.append( "\n DiskLimitType [" + this.getDiskLimitType() + "]" );
+        return str.toString();
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskElementDescriptor.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskElementDescriptor.java
index d515eebb..f086cb5c 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskElementDescriptor.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskElementDescriptor.java
@@ -1,153 +1,151 @@
-package org.apache.commons.jcs3.auxiliary.disk.block;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.io.Serializable;
-import java.util.Arrays;
-
-/**
- * This represents an element on disk. This is used when we persist the keys. We only store the
- * block addresses in memory. We don't need the length here, since all the blocks are the same size
- * recycle bin.
- * <p>
- * @author Aaron Smuts
- */
-public class BlockDiskElementDescriptor<K>
-    implements Serializable, Externalizable
-{
-    /** Don't change */
-    private static final long serialVersionUID = -1400659301208101411L;
-
-    /** The key */
-    private K key;
-
-    /** The array of block numbers */
-    private int[] blocks;
-
-    /**
-     * Default constructor
-     */
-    public BlockDiskElementDescriptor()
-    {
-        super();
-    }
-
-    /**
-     * Constructor
-     *
-     * @param key the key
-     * @param blocks the data
-     *
-     * @since 3.1
-     */
-    public BlockDiskElementDescriptor(K key, int[] blocks)
-    {
-        super();
-        this.key = key;
-        this.blocks = blocks;
-    }
-
-    /**
-     * @param key The key to set.
-     */
-    public void setKey( final K key )
-    {
-        this.key = key;
-    }
-
-    /**
-     * @return Returns the key.
-     */
-    public K getKey()
-    {
-        return key;
-    }
-
-    /**
-     * @param blocks The blocks to set.
-     */
-    public void setBlocks( final int[] blocks )
-    {
-        this.blocks = blocks;
-    }
-
-    /**
-     * This holds the block numbers. An item my be dispersed between multiple blocks.
-     * <p>
-     * @return Returns the blocks.
-     */
-    public int[] getBlocks()
-    {
-        return blocks;
-    }
-
-    /**
-     * For debugging.
-     * <p>
-     * @return Info on the descriptor.
-     */
-    @Override
-    public String toString()
-    {
-        final StringBuilder buf = new StringBuilder();
-        buf.append( "\nBlockDiskElementDescriptor" );
-        buf.append( "\n key [" + this.getKey() + "]" );
-        buf.append( "\n blocks [" );
-        if ( this.getBlocks() != null )
-        {
-            Arrays.stream(this.getBlocks()).forEach(buf::append);
-        }
-        buf.append( "]" );
-        return buf.toString();
-    }
-
-    /**
-     * Saves on reflection.
-     * <p>
-     * (non-Javadoc)
-     * @see java.io.Externalizable#readExternal(java.io.ObjectInput)
-     */
-    @Override
-    @SuppressWarnings("unchecked") // Need cast to K
-    public void readExternal( final ObjectInput input )
-        throws IOException, ClassNotFoundException
-    {
-        this.key = (K) input.readObject();
-        this.blocks = (int[]) input.readObject();
-    }
-
-    /**
-     * Saves on reflection.
-     * <p>
-     * (non-Javadoc)
-     * @see java.io.Externalizable#writeExternal(java.io.ObjectOutput)
-     */
-    @Override
-    public void writeExternal( final ObjectOutput output )
-        throws IOException
-    {
-        output.writeObject( this.key );
-        output.writeObject( this.blocks );
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.block;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.io.Serializable;
+import java.util.Arrays;
+
+/**
+ * This represents an element on disk. This is used when we persist the keys. We only store the
+ * block addresses in memory. We don't need the length here, since all the blocks are the same size
+ * recycle bin.
+ */
+public class BlockDiskElementDescriptor<K>
+    implements Serializable, Externalizable
+{
+    /** Don't change */
+    private static final long serialVersionUID = -1400659301208101411L;
+
+    /** The key */
+    private K key;
+
+    /** The array of block numbers */
+    private int[] blocks;
+
+    /**
+     * Default constructor
+     */
+    public BlockDiskElementDescriptor()
+    {
+        super();
+    }
+
+    /**
+     * Constructor
+     *
+     * @param key the key
+     * @param blocks the data
+     *
+     * @since 3.1
+     */
+    public BlockDiskElementDescriptor(K key, int[] blocks)
+    {
+        super();
+        this.key = key;
+        this.blocks = blocks;
+    }
+
+    /**
+     * @param key The key to set.
+     */
+    public void setKey( final K key )
+    {
+        this.key = key;
+    }
+
+    /**
+     * @return Returns the key.
+     */
+    public K getKey()
+    {
+        return key;
+    }
+
+    /**
+     * @param blocks The blocks to set.
+     */
+    public void setBlocks( final int[] blocks )
+    {
+        this.blocks = blocks;
+    }
+
+    /**
+     * This holds the block numbers. An item my be dispersed between multiple blocks.
+     * <p>
+     * @return Returns the blocks.
+     */
+    public int[] getBlocks()
+    {
+        return blocks;
+    }
+
+    /**
+     * For debugging.
+     * <p>
+     * @return Info on the descriptor.
+     */
+    @Override
+    public String toString()
+    {
+        final StringBuilder buf = new StringBuilder();
+        buf.append( "\nBlockDiskElementDescriptor" );
+        buf.append( "\n key [" + this.getKey() + "]" );
+        buf.append( "\n blocks [" );
+        if ( this.getBlocks() != null )
+        {
+            Arrays.stream(this.getBlocks()).forEach(buf::append);
+        }
+        buf.append( "]" );
+        return buf.toString();
+    }
+
+    /**
+     * Saves on reflection.
+     * <p>
+     * (non-Javadoc)
+     * @see java.io.Externalizable#readExternal(java.io.ObjectInput)
+     */
+    @Override
+    @SuppressWarnings("unchecked") // Need cast to K
+    public void readExternal( final ObjectInput input )
+        throws IOException, ClassNotFoundException
+    {
+        this.key = (K) input.readObject();
+        this.blocks = (int[]) input.readObject();
+    }
+
+    /**
+     * Saves on reflection.
+     * <p>
+     * (non-Javadoc)
+     * @see java.io.Externalizable#writeExternal(java.io.ObjectOutput)
+     */
+    @Override
+    public void writeExternal( final ObjectOutput output )
+        throws IOException
+    {
+        output.writeObject( this.key );
+        output.writeObject( this.blocks );
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskKeyStore.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskKeyStore.java
index 7d310a63..07beae75 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskKeyStore.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/block/BlockDiskKeyStore.java
@@ -1,607 +1,604 @@
-package org.apache.commons.jcs3.auxiliary.disk.block;
-
-import java.io.EOFException;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.ObjectInputStream;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.nio.file.Files;
-import java.nio.file.StandardOpenOption;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.commons.jcs3.auxiliary.disk.behavior.IDiskCacheAttributes.DiskLimitType;
-import org.apache.commons.jcs3.engine.behavior.IElementSerializer;
-import org.apache.commons.jcs3.io.ObjectInputStreamClassLoaderAware;
-import org.apache.commons.jcs3.log.Log;
-import org.apache.commons.jcs3.log.LogManager;
-import org.apache.commons.jcs3.utils.serialization.StandardSerializer;
-import org.apache.commons.jcs3.utils.struct.AbstractLRUMap;
-import org.apache.commons.jcs3.utils.struct.LRUMap;
-import org.apache.commons.jcs3.utils.timing.ElapsedTimer;
-
-/**
- * This is responsible for storing the keys.
- * <p>
- *
- * @author Aaron Smuts
- */
-public class BlockDiskKeyStore<K>
-{
-    /**
-     * Class for recycling and lru. This implements the LRU overflow callback,
-     * so we can mark the blocks as free.
-     */
-    public class LRUMapCountLimited extends LRUMap<K, int[]>
-    {
-        /**
-         * <code>tag</code> tells us which map we are working on.
-         */
-        public final static String TAG = "orig-lru-count";
-
-        public LRUMapCountLimited(final int maxKeySize)
-        {
-            super(maxKeySize);
-        }
-
-        /**
-         * This is called when the may key size is reached. The least recently
-         * used item will be passed here. We will store the position and size of
-         * the spot on disk in the recycle bin.
-         * <p>
-         *
-         * @param key
-         * @param value
-         */
-        @Override
-        protected void processRemovedLRU(final K key, final int[] value)
-        {
-            blockDiskCache.freeBlocks(value);
-            if (log.isDebugEnabled())
-            {
-                log.debug("{0}: Removing key: [{1}] from key store.", logCacheName, key);
-                log.debug("{0}: Key store size: [{1}].", logCacheName, super.size());
-            }
-        }
-    }
-
-    /**
-     * Class for recycling and lru. This implements the LRU size overflow
-     * callback, so we can mark the blocks as free.
-     */
-    public class LRUMapSizeLimited extends AbstractLRUMap<K, int[]>
-    {
-        /**
-         * <code>tag</code> tells us which map we are working on.
-         */
-        public final static String TAG = "orig-lru-size";
-
-        // size of the content in kB
-        private final AtomicInteger contentSize;
-        private final int maxSize;
-
-        /**
-         * Default
-         */
-        public LRUMapSizeLimited()
-        {
-            this(-1);
-        }
-
-        /**
-         * @param maxSize
-         *            maximum cache size in kB
-         */
-        public LRUMapSizeLimited(final int maxSize)
-        {
-            this.maxSize = maxSize;
-            this.contentSize = new AtomicInteger(0);
-        }
-
-        // keep the content size in kB, so 2^31 kB is reasonable value
-        private void addLengthToCacheSize(final int[] value)
-        {
-            contentSize.addAndGet(value.length * blockSize / 1024 + 1);
-        }
-
-        /**
-         * This is called when the may key size is reached. The least recently
-         * used item will be passed here. We will store the position and size of
-         * the spot on disk in the recycle bin.
-         * <p>
-         *
-         * @param key
-         * @param value
-         */
-        @Override
-        protected void processRemovedLRU(final K key, final int[] value)
-        {
-            blockDiskCache.freeBlocks(value);
-            if (log.isDebugEnabled())
-            {
-                log.debug("{0}: Removing key: [{1}] from key store.", logCacheName, key);
-                log.debug("{0}: Key store size: [{1}].", logCacheName, super.size());
-            }
-
-            if (value != null)
-            {
-                subLengthFromCacheSize(value);
-            }
-        }
-
-        @Override
-        public int[] put(final K key, final int[] value)
-        {
-            int[] oldValue = null;
-
-            try
-            {
-                oldValue = super.put(key, value);
-            }
-            finally
-            {
-                if (value != null)
-                {
-                    addLengthToCacheSize(value);
-                }
-                if (oldValue != null)
-                {
-                    subLengthFromCacheSize(oldValue);
-                }
-            }
-
-            return oldValue;
-        }
-
-        @Override
-        public int[] remove(final Object key)
-        {
-            int[] value = null;
-
-            try
-            {
-                value = super.remove(key);
-                return value;
-            }
-            finally
-            {
-                if (value != null)
-                {
-                    subLengthFromCacheSize(value);
-                }
-            }
-        }
-
-        @Override
-        protected boolean shouldRemove()
-        {
-            return maxSize > 0 && contentSize.get() > maxSize && this.size() > 1;
-        }
-
-        // keep the content size in kB, so 2^31 kB is reasonable value
-        private void subLengthFromCacheSize(final int[] value)
-        {
-            contentSize.addAndGet(value.length * blockSize / -1024 - 1);
-        }
-    }
-
-    /** The logger */
-    private static final Log log = LogManager.getLog(BlockDiskKeyStore.class);
-
-    /** Attributes governing the behavior of the block disk cache. */
-    private final BlockDiskCacheAttributes blockDiskCacheAttributes;
-
-    /** The key to block map */
-    private Map<K, int[]> keyHash;
-
-    /** The file where we persist the keys */
-    private final File keyFile;
-
-    /** The key file signature for new-style key files */
-    private final static int KEY_FILE_SIGNATURE = 0x6A63734B; // "jcsK"
-
-    /** The name to prefix log messages with. */
-    protected final String logCacheName;
-
-    /** Name of the file where we persist the keys */
-    private final String fileName;
-
-    /** The maximum number of keys to store in memory */
-    private final int maxKeySize;
-
-    /**
-     * we need this so we can communicate free blocks to the data store when
-     * keys fall off the LRU
-     */
-    protected final BlockDiskCache<K, ?> blockDiskCache;
-
-    private DiskLimitType diskLimitType = DiskLimitType.COUNT;
-
-    private final int blockSize;
-
-    /**
-     * Serializer for reading and writing key file
-     */
-    private final IElementSerializer serializer;
-
-    /**
-     * Set the configuration options.
-     * <p>
-     *
-     * @param cacheAttributes
-     * @param blockDiskCache
-     *            used for freeing
-     */
-    public BlockDiskKeyStore(final BlockDiskCacheAttributes cacheAttributes, final BlockDiskCache<K, ?> blockDiskCache)
-    {
-        this.blockDiskCacheAttributes = cacheAttributes;
-        this.logCacheName = "Region [" + this.blockDiskCacheAttributes.getCacheName() + "] ";
-        this.fileName = this.blockDiskCacheAttributes.getCacheName();
-        this.maxKeySize = cacheAttributes.getMaxKeySize();
-        this.blockDiskCache = blockDiskCache;
-        this.diskLimitType = cacheAttributes.getDiskLimitType();
-        this.blockSize = cacheAttributes.getBlockSizeBytes();
-
-        if (blockDiskCache == null)
-        {
-            this.serializer = new StandardSerializer();
-        }
-        else
-        {
-            this.serializer = blockDiskCache.getElementSerializer();
-        }
-
-        final File rootDirectory = cacheAttributes.getDiskPath();
-
-        log.info("{0}: Cache file root directory [{1}]", logCacheName, rootDirectory);
-
-        this.keyFile = new File(rootDirectory, fileName + ".key");
-
-        log.info("{0}: Key File [{1}]", logCacheName, this.keyFile.getAbsolutePath());
-
-        if (keyFile.length() > 0)
-        {
-            loadKeys();
-            if (!verify())
-            {
-                log.warn("{0}: Key File is invalid. Resetting file.", logCacheName);
-                initKeyMap();
-                reset();
-            }
-        }
-        else
-        {
-            initKeyMap();
-        }
-    }
-
-    /**
-     * This is mainly used for testing. It leave the disk in tact, and just
-     * clears memory.
-     */
-    protected void clearMemoryMap()
-    {
-        this.keyHash.clear();
-    }
-
-    /**
-     * Gets the entry set.
-     * <p>
-     *
-     * @return entry set.
-     */
-    public Set<Map.Entry<K, int[]>> entrySet()
-    {
-        return this.keyHash.entrySet();
-    }
-
-    /**
-     * gets the object for the key.
-     * <p>
-     *
-     * @param key
-     * @return Object
-     */
-    public int[] get(final K key)
-    {
-        return this.keyHash.get(key);
-    }
-
-    /**
-     * Create the map for keys that contain the index position on disk.
-     */
-    private void initKeyMap()
-    {
-        keyHash = null;
-        if (maxKeySize >= 0)
-        {
-            if (this.diskLimitType == DiskLimitType.SIZE)
-            {
-                keyHash = new LRUMapSizeLimited(maxKeySize);
-            }
-            else
-            {
-                keyHash = new LRUMapCountLimited(maxKeySize);
-            }
-            log.info("{0}: Set maxKeySize to: \"{1}\"", logCacheName, maxKeySize);
-        }
-        else
-        {
-            // If no max size, use a plain map for memory and processing
-            // efficiency.
-            keyHash = new HashMap<>();
-            // keyHash = Collections.synchronizedMap( new HashMap() );
-            log.info("{0}: Set maxKeySize to unlimited", logCacheName);
-        }
-    }
-
-    /**
-     * Tests emptiness (size == 0).
-     *
-     * @return Whether or not this is empty.
-     * @since 3.1
-     */
-    public boolean isEmpty()
-    {
-        return size() == 0;
-    }
-
-    /**
-     * Gets the key set.
-     * <p>
-     *
-     * @return key set.
-     */
-    public Set<K> keySet()
-    {
-        return this.keyHash.keySet();
-    }
-
-    /**
-     * Loads the keys from the .key file. The keys are stored individually on
-     * disk. They are added one by one to an LRUMap.
-     */
-    protected void loadKeys()
-    {
-        log.info("{0}: Loading keys for {1}", () -> logCacheName, keyFile::toString);
-
-        // create a key map to use.
-        initKeyMap();
-
-        final HashMap<K, int[]> keys = new HashMap<>();
-
-        synchronized (keyFile)
-        {
-            // Check file type
-            int fileSignature = 0;
-
-            try (FileChannel bc = FileChannel.open(keyFile.toPath(), StandardOpenOption.READ))
-            {
-                final ByteBuffer signature = ByteBuffer.allocate(4);
-                bc.read(signature);
-                signature.flip();
-                fileSignature = signature.getInt();
-
-                if (fileSignature == KEY_FILE_SIGNATURE)
-                {
-                    while (true)
-                    {
-                        try
-                        {
-                            final BlockDiskElementDescriptor<K> descriptor =
-                                    serializer.deSerializeFrom(bc, null);
-                            if (descriptor != null)
-                            {
-                                keys.put(descriptor.getKey(), descriptor.getBlocks());
-                            }
-                        }
-                        catch (EOFException e)
-                        {
-                            break;
-                        }
-                    }
-                }
-            }
-            catch (final IOException | ClassNotFoundException e)
-            {
-                log.error("{0}: Problem loading keys for file {1}", logCacheName, fileName, e);
-            }
-
-            if (fileSignature != KEY_FILE_SIGNATURE)
-            {
-                try (InputStream fis = Files.newInputStream(keyFile.toPath());
-                     ObjectInputStream ois = new ObjectInputStreamClassLoaderAware(fis, null))
-                {
-                    while (true)
-                    {
-                        @SuppressWarnings("unchecked")
-                        final
-                        // Need to cast from Object
-                        BlockDiskElementDescriptor<K> descriptor = (BlockDiskElementDescriptor<K>) ois.readObject();
-                        if (descriptor != null)
-                        {
-                            keys.put(descriptor.getKey(), descriptor.getBlocks());
-                        }
-                    }
-                }
-                catch (final EOFException eof)
-                {
-                    // nothing
-                }
-                catch (final IOException | ClassNotFoundException e)
-                {
-                    log.error("{0}: Problem loading keys (old style) for file {1}", logCacheName, fileName, e);
-                }
-            }
-        }
-
-        if (!keys.isEmpty())
-        {
-            keyHash.putAll(keys);
-
-            log.debug("{0}: Found {1} in keys file.", () -> logCacheName, keys::size);
-            log.info("{0}: Loaded keys from [{1}], key count: {2}; up to {3} will be available.",
-                    () -> logCacheName, () -> fileName, this::size,
-                    () -> maxKeySize);
-        }
-    }
-
-    /**
-     * Puts a int[] in the keyStore.
-     * <p>
-     *
-     * @param key
-     * @param value
-     */
-    public void put(final K key, final int[] value)
-    {
-        this.keyHash.put(key, value);
-    }
-
-    /**
-     * Remove by key.
-     * <p>
-     *
-     * @param key
-     * @return BlockDiskElementDescriptor if it was present, else null
-     */
-    public int[] remove(final K key)
-    {
-        return this.keyHash.remove(key);
-    }
-
-    /**
-     * Resets the file and creates a new key map.
-     */
-    protected void reset()
-    {
-        synchronized (keyFile)
-        {
-            clearMemoryMap();
-            saveKeys();
-        }
-    }
-
-    /**
-     * Saves key file to disk. This gets the LRUMap entry set and write the
-     * entries out one by one after putting them in a wrapper.
-     */
-    protected void saveKeys()
-    {
-        final ElapsedTimer timer = new ElapsedTimer();
-        log.info("{0}: Saving keys to [{1}], key count [{2}]", () -> logCacheName,
-                this.keyFile::getAbsolutePath, this::size);
-
-        synchronized (keyFile)
-        {
-            try (FileChannel bc = FileChannel.open(keyFile.toPath(),
-                    StandardOpenOption.CREATE,
-                    StandardOpenOption.WRITE,
-                    StandardOpenOption.TRUNCATE_EXISTING))
-            {
-                if (!verify())
-                {
-                    throw new IOException("Inconsistent key file");
-                }
-
-                // Write signature to distinguish old format from new one
-                ByteBuffer signature = ByteBuffer.allocate(4);
-                signature.putInt(KEY_FILE_SIGNATURE).flip();
-                bc.write(signature);
-
-                // don't need to synchronize, since the underlying
-                // collection makes a copy
-                for (final Map.Entry<K, int[]> entry : keyHash.entrySet())
-                {
-                    final BlockDiskElementDescriptor<K> descriptor =
-                            new BlockDiskElementDescriptor<>(entry.getKey(),entry.getValue());
-                    // stream these out in the loop.
-                    serializer.serializeTo(descriptor, bc);
-                }
-            }
-            catch (final IOException e)
-            {
-                log.error("{0}: Problem storing keys.", logCacheName, e);
-            }
-        }
-
-        log.info("{0}: Finished saving keys. It took {1} to store {2} keys. Key file length [{3}]",
-                () -> logCacheName, timer::getElapsedTimeString, this::size,
-                keyFile::length);
-    }
-
-    /**
-     * Gets the size of the key hash.
-     * <p>
-     *
-     * @return the number of keys.
-     */
-    public int size()
-    {
-        return this.keyHash.size();
-    }
-
-    /**
-     * Verify key store integrity
-     *
-     * @return true if key store is valid
-     */
-    private boolean verify()
-    {
-        final Map<Integer, Set<K>> blockAllocationMap = new TreeMap<>();
-        for (final Entry<K, int[]> e : keyHash.entrySet())
-        {
-            for (final int block : e.getValue())
-            {
-                Set<K> keys = blockAllocationMap.computeIfAbsent(block, s -> new HashSet<>());
-                if (!keys.isEmpty() && !log.isTraceEnabled())
-                {
-                    // keys are not null, and no debug - fail fast
-                    return false;
-                }
-                keys.add(e.getKey());
-            }
-        }
-        if (!log.isTraceEnabled())
-        {
-            return true;
-        }
-        boolean ok = true;
-        for (final Entry<Integer, Set<K>> e : blockAllocationMap.entrySet())
-        {
-            log.trace("Block {0}: {1}", e::getKey, e::getValue);
-            if (e.getValue().size() > 1)
-            {
-                ok = false;
-            }
-        }
-        return ok;
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.block;
+
+import java.io.EOFException;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectInputStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.Files;
+import java.nio.file.StandardOpenOption;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.commons.jcs3.auxiliary.disk.behavior.IDiskCacheAttributes.DiskLimitType;
+import org.apache.commons.jcs3.engine.behavior.IElementSerializer;
+import org.apache.commons.jcs3.io.ObjectInputStreamClassLoaderAware;
+import org.apache.commons.jcs3.log.Log;
+import org.apache.commons.jcs3.log.LogManager;
+import org.apache.commons.jcs3.utils.serialization.StandardSerializer;
+import org.apache.commons.jcs3.utils.struct.AbstractLRUMap;
+import org.apache.commons.jcs3.utils.struct.LRUMap;
+import org.apache.commons.jcs3.utils.timing.ElapsedTimer;
+
+/**
+ * This is responsible for storing the keys.
+ */
+public class BlockDiskKeyStore<K>
+{
+    /**
+     * Class for recycling and lru. This implements the LRU overflow callback,
+     * so we can mark the blocks as free.
+     */
+    public class LRUMapCountLimited extends LRUMap<K, int[]>
+    {
+        /**
+         * <code>tag</code> tells us which map we are working on.
+         */
+        public final static String TAG = "orig-lru-count";
+
+        public LRUMapCountLimited(final int maxKeySize)
+        {
+            super(maxKeySize);
+        }
+
+        /**
+         * This is called when the may key size is reached. The least recently
+         * used item will be passed here. We will store the position and size of
+         * the spot on disk in the recycle bin.
+         * <p>
+         *
+         * @param key
+         * @param value
+         */
+        @Override
+        protected void processRemovedLRU(final K key, final int[] value)
+        {
+            blockDiskCache.freeBlocks(value);
+            if (log.isDebugEnabled())
+            {
+                log.debug("{0}: Removing key: [{1}] from key store.", logCacheName, key);
+                log.debug("{0}: Key store size: [{1}].", logCacheName, super.size());
+            }
+        }
+    }
+
+    /**
+     * Class for recycling and lru. This implements the LRU size overflow
+     * callback, so we can mark the blocks as free.
+     */
+    public class LRUMapSizeLimited extends AbstractLRUMap<K, int[]>
+    {
+        /**
+         * <code>tag</code> tells us which map we are working on.
+         */
+        public final static String TAG = "orig-lru-size";
+
+        // size of the content in kB
+        private final AtomicInteger contentSize;
+        private final int maxSize;
+
+        /**
+         * Default
+         */
+        public LRUMapSizeLimited()
+        {
+            this(-1);
+        }
+
+        /**
+         * @param maxSize
+         *            maximum cache size in kB
+         */
+        public LRUMapSizeLimited(final int maxSize)
+        {
+            this.maxSize = maxSize;
+            this.contentSize = new AtomicInteger(0);
+        }
+
+        // keep the content size in kB, so 2^31 kB is reasonable value
+        private void addLengthToCacheSize(final int[] value)
+        {
+            contentSize.addAndGet(value.length * blockSize / 1024 + 1);
+        }
+
+        /**
+         * This is called when the may key size is reached. The least recently
+         * used item will be passed here. We will store the position and size of
+         * the spot on disk in the recycle bin.
+         * <p>
+         *
+         * @param key
+         * @param value
+         */
+        @Override
+        protected void processRemovedLRU(final K key, final int[] value)
+        {
+            blockDiskCache.freeBlocks(value);
+            if (log.isDebugEnabled())
+            {
+                log.debug("{0}: Removing key: [{1}] from key store.", logCacheName, key);
+                log.debug("{0}: Key store size: [{1}].", logCacheName, super.size());
+            }
+
+            if (value != null)
+            {
+                subLengthFromCacheSize(value);
+            }
+        }
+
+        @Override
+        public int[] put(final K key, final int[] value)
+        {
+            int[] oldValue = null;
+
+            try
+            {
+                oldValue = super.put(key, value);
+            }
+            finally
+            {
+                if (value != null)
+                {
+                    addLengthToCacheSize(value);
+                }
+                if (oldValue != null)
+                {
+                    subLengthFromCacheSize(oldValue);
+                }
+            }
+
+            return oldValue;
+        }
+
+        @Override
+        public int[] remove(final Object key)
+        {
+            int[] value = null;
+
+            try
+            {
+                value = super.remove(key);
+                return value;
+            }
+            finally
+            {
+                if (value != null)
+                {
+                    subLengthFromCacheSize(value);
+                }
+            }
+        }
+
+        @Override
+        protected boolean shouldRemove()
+        {
+            return maxSize > 0 && contentSize.get() > maxSize && this.size() > 1;
+        }
+
+        // keep the content size in kB, so 2^31 kB is reasonable value
+        private void subLengthFromCacheSize(final int[] value)
+        {
+            contentSize.addAndGet(value.length * blockSize / -1024 - 1);
+        }
+    }
+
+    /** The logger */
+    private static final Log log = LogManager.getLog(BlockDiskKeyStore.class);
+
+    /** Attributes governing the behavior of the block disk cache. */
+    private final BlockDiskCacheAttributes blockDiskCacheAttributes;
+
+    /** The key to block map */
+    private Map<K, int[]> keyHash;
+
+    /** The file where we persist the keys */
+    private final File keyFile;
+
+    /** The key file signature for new-style key files */
+    private final static int KEY_FILE_SIGNATURE = 0x6A63734B; // "jcsK"
+
+    /** The name to prefix log messages with. */
+    protected final String logCacheName;
+
+    /** Name of the file where we persist the keys */
+    private final String fileName;
+
+    /** The maximum number of keys to store in memory */
+    private final int maxKeySize;
+
+    /**
+     * we need this so we can communicate free blocks to the data store when
+     * keys fall off the LRU
+     */
+    protected final BlockDiskCache<K, ?> blockDiskCache;
+
+    private DiskLimitType diskLimitType = DiskLimitType.COUNT;
+
+    private final int blockSize;
+
+    /**
+     * Serializer for reading and writing key file
+     */
+    private final IElementSerializer serializer;
+
+    /**
+     * Set the configuration options.
+     * <p>
+     *
+     * @param cacheAttributes
+     * @param blockDiskCache
+     *            used for freeing
+     */
+    public BlockDiskKeyStore(final BlockDiskCacheAttributes cacheAttributes, final BlockDiskCache<K, ?> blockDiskCache)
+    {
+        this.blockDiskCacheAttributes = cacheAttributes;
+        this.logCacheName = "Region [" + this.blockDiskCacheAttributes.getCacheName() + "] ";
+        this.fileName = this.blockDiskCacheAttributes.getCacheName();
+        this.maxKeySize = cacheAttributes.getMaxKeySize();
+        this.blockDiskCache = blockDiskCache;
+        this.diskLimitType = cacheAttributes.getDiskLimitType();
+        this.blockSize = cacheAttributes.getBlockSizeBytes();
+
+        if (blockDiskCache == null)
+        {
+            this.serializer = new StandardSerializer();
+        }
+        else
+        {
+            this.serializer = blockDiskCache.getElementSerializer();
+        }
+
+        final File rootDirectory = cacheAttributes.getDiskPath();
+
+        log.info("{0}: Cache file root directory [{1}]", logCacheName, rootDirectory);
+
+        this.keyFile = new File(rootDirectory, fileName + ".key");
+
+        log.info("{0}: Key File [{1}]", logCacheName, this.keyFile.getAbsolutePath());
+
+        if (keyFile.length() > 0)
+        {
+            loadKeys();
+            if (!verify())
+            {
+                log.warn("{0}: Key File is invalid. Resetting file.", logCacheName);
+                initKeyMap();
+                reset();
+            }
+        }
+        else
+        {
+            initKeyMap();
+        }
+    }
+
+    /**
+     * This is mainly used for testing. It leave the disk in tact, and just
+     * clears memory.
+     */
+    protected void clearMemoryMap()
+    {
+        this.keyHash.clear();
+    }
+
+    /**
+     * Gets the entry set.
+     * <p>
+     *
+     * @return entry set.
+     */
+    public Set<Map.Entry<K, int[]>> entrySet()
+    {
+        return this.keyHash.entrySet();
+    }
+
+    /**
+     * gets the object for the key.
+     * <p>
+     *
+     * @param key
+     * @return Object
+     */
+    public int[] get(final K key)
+    {
+        return this.keyHash.get(key);
+    }
+
+    /**
+     * Create the map for keys that contain the index position on disk.
+     */
+    private void initKeyMap()
+    {
+        keyHash = null;
+        if (maxKeySize >= 0)
+        {
+            if (this.diskLimitType == DiskLimitType.SIZE)
+            {
+                keyHash = new LRUMapSizeLimited(maxKeySize);
+            }
+            else
+            {
+                keyHash = new LRUMapCountLimited(maxKeySize);
+            }
+            log.info("{0}: Set maxKeySize to: \"{1}\"", logCacheName, maxKeySize);
+        }
+        else
+        {
+            // If no max size, use a plain map for memory and processing
+            // efficiency.
+            keyHash = new HashMap<>();
+            // keyHash = Collections.synchronizedMap( new HashMap() );
+            log.info("{0}: Set maxKeySize to unlimited", logCacheName);
+        }
+    }
+
+    /**
+     * Tests emptiness (size == 0).
+     *
+     * @return Whether or not this is empty.
+     * @since 3.1
+     */
+    public boolean isEmpty()
+    {
+        return size() == 0;
+    }
+
+    /**
+     * Gets the key set.
+     * <p>
+     *
+     * @return key set.
+     */
+    public Set<K> keySet()
+    {
+        return this.keyHash.keySet();
+    }
+
+    /**
+     * Loads the keys from the .key file. The keys are stored individually on
+     * disk. They are added one by one to an LRUMap.
+     */
+    protected void loadKeys()
+    {
+        log.info("{0}: Loading keys for {1}", () -> logCacheName, keyFile::toString);
+
+        // create a key map to use.
+        initKeyMap();
+
+        final HashMap<K, int[]> keys = new HashMap<>();
+
+        synchronized (keyFile)
+        {
+            // Check file type
+            int fileSignature = 0;
+
+            try (FileChannel bc = FileChannel.open(keyFile.toPath(), StandardOpenOption.READ))
+            {
+                final ByteBuffer signature = ByteBuffer.allocate(4);
+                bc.read(signature);
+                signature.flip();
+                fileSignature = signature.getInt();
+
+                if (fileSignature == KEY_FILE_SIGNATURE)
+                {
+                    while (true)
+                    {
+                        try
+                        {
+                            final BlockDiskElementDescriptor<K> descriptor =
+                                    serializer.deSerializeFrom(bc, null);
+                            if (descriptor != null)
+                            {
+                                keys.put(descriptor.getKey(), descriptor.getBlocks());
+                            }
+                        }
+                        catch (EOFException e)
+                        {
+                            break;
+                        }
+                    }
+                }
+            }
+            catch (final IOException | ClassNotFoundException e)
+            {
+                log.error("{0}: Problem loading keys for file {1}", logCacheName, fileName, e);
+            }
+
+            if (fileSignature != KEY_FILE_SIGNATURE)
+            {
+                try (InputStream fis = Files.newInputStream(keyFile.toPath());
+                     ObjectInputStream ois = new ObjectInputStreamClassLoaderAware(fis, null))
+                {
+                    while (true)
+                    {
+                        @SuppressWarnings("unchecked")
+                        final
+                        // Need to cast from Object
+                        BlockDiskElementDescriptor<K> descriptor = (BlockDiskElementDescriptor<K>) ois.readObject();
+                        if (descriptor != null)
+                        {
+                            keys.put(descriptor.getKey(), descriptor.getBlocks());
+                        }
+                    }
+                }
+                catch (final EOFException eof)
+                {
+                    // nothing
+                }
+                catch (final IOException | ClassNotFoundException e)
+                {
+                    log.error("{0}: Problem loading keys (old style) for file {1}", logCacheName, fileName, e);
+                }
+            }
+        }
+
+        if (!keys.isEmpty())
+        {
+            keyHash.putAll(keys);
+
+            log.debug("{0}: Found {1} in keys file.", () -> logCacheName, keys::size);
+            log.info("{0}: Loaded keys from [{1}], key count: {2}; up to {3} will be available.",
+                    () -> logCacheName, () -> fileName, this::size,
+                    () -> maxKeySize);
+        }
+    }
+
+    /**
+     * Puts a int[] in the keyStore.
+     * <p>
+     *
+     * @param key
+     * @param value
+     */
+    public void put(final K key, final int[] value)
+    {
+        this.keyHash.put(key, value);
+    }
+
+    /**
+     * Remove by key.
+     * <p>
+     *
+     * @param key
+     * @return BlockDiskElementDescriptor if it was present, else null
+     */
+    public int[] remove(final K key)
+    {
+        return this.keyHash.remove(key);
+    }
+
+    /**
+     * Resets the file and creates a new key map.
+     */
+    protected void reset()
+    {
+        synchronized (keyFile)
+        {
+            clearMemoryMap();
+            saveKeys();
+        }
+    }
+
+    /**
+     * Saves key file to disk. This gets the LRUMap entry set and write the
+     * entries out one by one after putting them in a wrapper.
+     */
+    protected void saveKeys()
+    {
+        final ElapsedTimer timer = new ElapsedTimer();
+        log.info("{0}: Saving keys to [{1}], key count [{2}]", () -> logCacheName,
+                this.keyFile::getAbsolutePath, this::size);
+
+        synchronized (keyFile)
+        {
+            try (FileChannel bc = FileChannel.open(keyFile.toPath(),
+                    StandardOpenOption.CREATE,
+                    StandardOpenOption.WRITE,
+                    StandardOpenOption.TRUNCATE_EXISTING))
+            {
+                if (!verify())
+                {
+                    throw new IOException("Inconsistent key file");
+                }
+
+                // Write signature to distinguish old format from new one
+                ByteBuffer signature = ByteBuffer.allocate(4);
+                signature.putInt(KEY_FILE_SIGNATURE).flip();
+                bc.write(signature);
+
+                // don't need to synchronize, since the underlying
+                // collection makes a copy
+                for (final Map.Entry<K, int[]> entry : keyHash.entrySet())
+                {
+                    final BlockDiskElementDescriptor<K> descriptor =
+                            new BlockDiskElementDescriptor<>(entry.getKey(),entry.getValue());
+                    // stream these out in the loop.
+                    serializer.serializeTo(descriptor, bc);
+                }
+            }
+            catch (final IOException e)
+            {
+                log.error("{0}: Problem storing keys.", logCacheName, e);
+            }
+        }
+
+        log.info("{0}: Finished saving keys. It took {1} to store {2} keys. Key file length [{3}]",
+                () -> logCacheName, timer::getElapsedTimeString, this::size,
+                keyFile::length);
+    }
+
+    /**
+     * Gets the size of the key hash.
+     * <p>
+     *
+     * @return the number of keys.
+     */
+    public int size()
+    {
+        return this.keyHash.size();
+    }
+
+    /**
+     * Verify key store integrity
+     *
+     * @return true if key store is valid
+     */
+    private boolean verify()
+    {
+        final Map<Integer, Set<K>> blockAllocationMap = new TreeMap<>();
+        for (final Entry<K, int[]> e : keyHash.entrySet())
+        {
+            for (final int block : e.getValue())
+            {
+                Set<K> keys = blockAllocationMap.computeIfAbsent(block, s -> new HashSet<>());
+                if (!keys.isEmpty() && !log.isTraceEnabled())
+                {
+                    // keys are not null, and no debug - fail fast
+                    return false;
+                }
+                keys.add(e.getKey());
+            }
+        }
+        if (!log.isTraceEnabled())
+        {
+            return true;
+        }
+        boolean ok = true;
+        for (final Entry<Integer, Set<K>> e : blockAllocationMap.entrySet())
+        {
+            log.trace("Block {0}: {1}", e::getKey, e::getValue);
+            if (e.getValue().size() > 1)
+            {
+                ok = false;
+            }
+        }
+        return ok;
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/JDBCDiskCache.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/JDBCDiskCache.java
index 4cdcb189..35e59077 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/JDBCDiskCache.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/JDBCDiskCache.java
@@ -1,808 +1,807 @@
-package org.apache.commons.jcs3.auxiliary.disk.jdbc;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DatabaseMetaData;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Timestamp;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import javax.sql.DataSource;
-
-import org.apache.commons.jcs3.auxiliary.AuxiliaryCacheAttributes;
-import org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache;
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.DataSourceFactory;
-import org.apache.commons.jcs3.engine.behavior.ICache;
-import org.apache.commons.jcs3.engine.behavior.ICacheElement;
-import org.apache.commons.jcs3.engine.logging.behavior.ICacheEvent;
-import org.apache.commons.jcs3.engine.logging.behavior.ICacheEventLogger;
-import org.apache.commons.jcs3.engine.stats.StatElement;
-import org.apache.commons.jcs3.engine.stats.behavior.IStatElement;
-import org.apache.commons.jcs3.engine.stats.behavior.IStats;
-import org.apache.commons.jcs3.log.Log;
-import org.apache.commons.jcs3.log.LogManager;
-
-/**
- * This is the jdbc disk cache plugin.
- * <p>
- * It expects a table created by the following script. The table name is configurable.
- * <p>
- *
- * <pre>
- *                       drop TABLE JCS_STORE;
- *                       CREATE TABLE JCS_STORE
- *                       (
- *                       CACHE_KEY                  VARCHAR(250)          NOT NULL,
- *                       REGION                     VARCHAR(250)          NOT NULL,
- *                       ELEMENT                    BLOB,
- *                       CREATE_TIME                TIMESTAMP,
- *                       UPDATE_TIME_SECONDS        BIGINT,
- *                       MAX_LIFE_SECONDS           BIGINT,
- *                       SYSTEM_EXPIRE_TIME_SECONDS BIGINT,
- *                       IS_ETERNAL                 CHAR(1),
- *                       PRIMARY KEY (CACHE_KEY, REGION)
- *                       );
- * </pre>
- * <p>
- * The cleanup thread will delete non eternal items where (now - create time) &gt; max life seconds *
- * 1000
- * <p>
- * To speed up the deletion the SYSTEM_EXPIRE_TIME_SECONDS is used instead. It is recommended that
- * an index be created on this column is you will have over a million records.
- * <p>
- * @author Aaron Smuts
- */
-public class JDBCDiskCache<K, V>
-    extends AbstractDiskCache<K, V>
-{
-    /** The local logger. */
-    private static final Log log = LogManager.getLog( JDBCDiskCache.class );
-
-    /** configuration */
-    private JDBCDiskCacheAttributes jdbcDiskCacheAttributes;
-
-    /** # of times update was called */
-    private final AtomicInteger updateCount = new AtomicInteger(0);
-
-    /** # of times get was called */
-    private final AtomicInteger getCount = new AtomicInteger(0);
-
-    /** # of times getMatching was called */
-    private final AtomicInteger getMatchingCount = new AtomicInteger(0);
-
-    /** db connection pool */
-    private final DataSourceFactory dsFactory;
-
-    /** tracks optimization */
-    private TableState tableState;
-
-    /**
-     * Constructs a JDBC Disk Cache for the provided cache attributes. The table state object is
-     * used to mark deletions.
-     * <p>
-     * @param cattr the configuration object for this cache
-     * @param dsFactory the DataSourceFactory for this cache
-     * @param tableState an object to track table operations
-     */
-    public JDBCDiskCache(final JDBCDiskCacheAttributes cattr, final DataSourceFactory dsFactory, final TableState tableState)
-    {
-        super( cattr );
-
-        setTableState( tableState );
-        setJdbcDiskCacheAttributes( cattr );
-
-        log.info( "jdbcDiskCacheAttributes = {0}", this::getJdbcDiskCacheAttributes);
-
-        // This initializes the pool access.
-        this.dsFactory = dsFactory;
-
-        // Initialization finished successfully, so set alive to true.
-        setAlive(true);
-    }
-
-    /**
-     * Inserts or updates. By default it will try to insert. If the item exists we will get an
-     * error. It will then update. This behavior is configurable. The cache can be configured to
-     * check before inserting.
-     * <p>
-     * @param ce
-     */
-    @Override
-    protected void processUpdate( final ICacheElement<K, V> ce )
-    {
-    	updateCount.incrementAndGet();
-
-        log.debug( "updating, ce = {0}", ce );
-
-        try (Connection con = getDataSource().getConnection())
-        {
-            log.debug( "Putting [{0}] on disk.", ce::getKey);
-
-            try
-            {
-                final byte[] element = getElementSerializer().serialize( ce );
-                insertOrUpdate( ce, con, element );
-            }
-            catch ( final IOException e )
-            {
-                log.error( "Could not serialize element", e );
-            }
-        }
-        catch ( final SQLException e )
-        {
-            log.error( "Problem getting connection.", e );
-        }
-    }
-
-    /**
-     * If test before insert it true, we check to see if the element exists. If the element exists
-     * we will update. Otherwise, we try inserting.  If this fails because the item exists, we will
-     * update.
-     * <p>
-     * @param ce
-     * @param con
-     * @param element
-     */
-    private void insertOrUpdate( final ICacheElement<K, V> ce, final Connection con, final byte[] element )
-    {
-        boolean exists = false;
-
-        // First do a query to determine if the element already exists
-        if ( this.getJdbcDiskCacheAttributes().isTestBeforeInsert() )
-        {
-            exists = doesElementExist( ce, con );
-        }
-
-        // If it doesn't exist, insert it, otherwise update
-        if ( !exists )
-        {
-            exists = insertRow( ce, con, element );
-        }
-
-        // update if it exists.
-        if ( exists )
-        {
-            updateRow( ce, con, element );
-        }
-    }
-
-    /**
-     * This inserts a new row in the database.
-     * <p>
-     * @param ce
-     * @param con
-     * @param element
-     * @return true if the insertion fails because the record exists.
-     */
-    private boolean insertRow( final ICacheElement<K, V> ce, final Connection con, final byte[] element )
-    {
-        boolean exists = false;
-        final String sqlI = String.format("insert into %s"
-                + " (CACHE_KEY, REGION, ELEMENT, MAX_LIFE_SECONDS, IS_ETERNAL, CREATE_TIME, UPDATE_TIME_SECONDS,"
-                + " SYSTEM_EXPIRE_TIME_SECONDS) "
-                + " values (?, ?, ?, ?, ?, ?, ?, ?)", getJdbcDiskCacheAttributes().getTableName());
-
-        try (PreparedStatement psInsert = con.prepareStatement( sqlI ))
-        {
-            psInsert.setString( 1, ce.getKey().toString() );
-            psInsert.setString( 2, this.getCacheName() );
-            psInsert.setBytes( 3, element );
-            psInsert.setLong( 4, ce.getElementAttributes().getMaxLife() );
-            psInsert.setString( 5, ce.getElementAttributes().getIsEternal() ? "T" : "F" );
-
-            final Timestamp createTime = new Timestamp( ce.getElementAttributes().getCreateTime() );
-            psInsert.setTimestamp( 6, createTime );
-
-            final long now = System.currentTimeMillis() / 1000;
-            psInsert.setLong( 7, now );
-
-            final long expireTime = now + ce.getElementAttributes().getMaxLife();
-            psInsert.setLong( 8, expireTime );
-
-            psInsert.execute();
-        }
-        catch ( final SQLException e )
-        {
-            if ("23000".equals(e.getSQLState()))
-            {
-                exists = true;
-            }
-            else
-            {
-                log.error( "Could not insert element", e );
-            }
-
-            // see if it exists, if we didn't already
-            if ( !exists && !this.getJdbcDiskCacheAttributes().isTestBeforeInsert() )
-            {
-                exists = doesElementExist( ce, con );
-            }
-        }
-
-        return exists;
-    }
-
-    /**
-     * This updates a row in the database.
-     * <p>
-     * @param ce
-     * @param con
-     * @param element
-     */
-    private void updateRow( final ICacheElement<K, V> ce, final Connection con, final byte[] element )
-    {
-        final String sqlU = String.format("update %s"
-                + " set ELEMENT  = ?, CREATE_TIME = ?, UPDATE_TIME_SECONDS = ?, " + " SYSTEM_EXPIRE_TIME_SECONDS = ? "
-                + " where CACHE_KEY = ? and REGION = ?", getJdbcDiskCacheAttributes().getTableName());
-
-        try (PreparedStatement psUpdate = con.prepareStatement( sqlU ))
-        {
-            psUpdate.setBytes( 1, element );
-
-            final Timestamp createTime = new Timestamp( ce.getElementAttributes().getCreateTime() );
-            psUpdate.setTimestamp( 2, createTime );
-
-            final long now = System.currentTimeMillis() / 1000;
-            psUpdate.setLong( 3, now );
-
-            final long expireTime = now + ce.getElementAttributes().getMaxLife();
-            psUpdate.setLong( 4, expireTime );
-
-            psUpdate.setString( 5, (String) ce.getKey() );
-            psUpdate.setString( 6, this.getCacheName() );
-            psUpdate.execute();
-
-            log.debug( "ran update {0}", sqlU );
-        }
-        catch ( final SQLException e )
-        {
-            log.error( "Error executing update sql [{0}]", sqlU, e );
-        }
-    }
-
-    /**
-     * Does an element exist for this key?
-     * <p>
-     * @param ce the cache element
-     * @param con a database connection
-     * @return boolean
-     */
-    protected boolean doesElementExist( final ICacheElement<K, V> ce, final Connection con )
-    {
-        boolean exists = false;
-        // don't select the element, since we want this to be fast.
-        final String sqlS = String.format("select CACHE_KEY from %s where REGION = ? and CACHE_KEY = ?",
-                getJdbcDiskCacheAttributes().getTableName());
-
-        try (PreparedStatement psSelect = con.prepareStatement( sqlS ))
-        {
-            psSelect.setString( 1, this.getCacheName() );
-            psSelect.setString( 2, (String) ce.getKey() );
-
-            try (ResultSet rs = psSelect.executeQuery())
-            {
-                exists = rs.next();
-            }
-
-            log.debug( "[{0}] existing status is {1}", ce.getKey(), exists );
-        }
-        catch ( final SQLException e )
-        {
-            log.error( "Problem looking for item before insert.", e );
-        }
-
-        return exists;
-    }
-
-    /**
-     * Queries the database for the value. If it gets a result, the value is deserialized.
-     * <p>
-     * @param key
-     * @return ICacheElement
-     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#get(Object)
-     */
-    @Override
-    protected ICacheElement<K, V> processGet( final K key )
-    {
-    	getCount.incrementAndGet();
-
-        log.debug( "Getting [{0}] from disk", key );
-
-        if ( !isAlive() )
-        {
-            return null;
-        }
-
-        ICacheElement<K, V> obj = null;
-
-        // region, key
-        final String selectString = String.format("select ELEMENT from %s where REGION = ? and CACHE_KEY = ?",
-                getJdbcDiskCacheAttributes().getTableName());
-
-        try (Connection con = getDataSource().getConnection())
-        {
-            try (PreparedStatement psSelect = con.prepareStatement( selectString ))
-            {
-                psSelect.setString( 1, this.getCacheName() );
-                psSelect.setString( 2, key.toString() );
-
-                try (ResultSet rs = psSelect.executeQuery())
-                {
-                    byte[] data = null;
-
-                    if ( rs.next() )
-                    {
-                        data = rs.getBytes( 1 );
-                    }
-
-                    if ( data != null )
-                    {
-                        try
-                        {
-                            // USE THE SERIALIZER
-                            obj = getElementSerializer().deSerialize( data, null );
-                        }
-                        catch ( final IOException | ClassNotFoundException e )
-                        {
-                            log.error( "Problem getting item for key [{0}]", key, e );
-                        }
-                    }
-                }
-            }
-        }
-        catch ( final SQLException sqle )
-        {
-            log.error( "Caught a SQL exception trying to get the item for key [{0}]",
-                    key, sqle );
-        }
-
-        return obj;
-    }
-
-    /**
-     * This will run a like query. It will try to construct a usable query but different
-     * implementations will be needed to adjust the syntax.
-     * <p>
-     * @param pattern
-     * @return key,value map
-     */
-    @Override
-    protected Map<K, ICacheElement<K, V>> processGetMatching( final String pattern )
-    {
-    	getMatchingCount.incrementAndGet();
-
-        log.debug( "Getting [{0}] from disk", pattern);
-
-        if ( !isAlive() )
-        {
-            return null;
-        }
-
-        final Map<K, ICacheElement<K, V>> results = new HashMap<>();
-
-        // region, key
-        final String selectString = String.format("select ELEMENT from %s where REGION = ? and CACHE_KEY like ?",
-                getJdbcDiskCacheAttributes().getTableName());
-
-        try (Connection con = getDataSource().getConnection())
-        {
-            try (PreparedStatement psSelect = con.prepareStatement( selectString ))
-            {
-                psSelect.setString( 1, this.getCacheName() );
-                psSelect.setString( 2, constructLikeParameterFromPattern( pattern ) );
-
-                try (ResultSet rs = psSelect.executeQuery())
-                {
-                    while ( rs.next() )
-                    {
-                        final byte[] data = rs.getBytes(1);
-                        if ( data != null )
-                        {
-                            try
-                            {
-                                // USE THE SERIALIZER
-                                final ICacheElement<K, V> value = getElementSerializer().deSerialize( data, null );
-                                results.put( value.getKey(), value );
-                            }
-                            catch ( final IOException | ClassNotFoundException e )
-                            {
-                                log.error( "Problem getting items for pattern [{0}]", pattern, e );
-                            }
-                        }
-                    }
-                }
-            }
-        }
-        catch ( final SQLException sqle )
-        {
-            log.error( "Caught a SQL exception trying to get items for pattern [{0}]",
-                    pattern, sqle );
-        }
-
-        return results;
-    }
-
-    /**
-     * @param pattern
-     * @return String to use in the like query.
-     */
-    public String constructLikeParameterFromPattern( final String pattern )
-    {
-        String likePattern = pattern.replaceAll( "\\.\\+", "%" );
-        likePattern = likePattern.replaceAll( "\\.", "_" );
-
-        log.debug( "pattern = [{0}]", likePattern );
-
-        return likePattern;
-    }
-
-    /**
-     * Returns true if the removal was successful; or false if there is nothing to remove. Current
-     * implementation always results in a disk orphan.
-     * <p>
-     * @param key
-     * @return boolean
-     */
-    @Override
-    protected boolean processRemove( final K key )
-    {
-        // remove single item.
-        final String sqlSingle = String.format("delete from %s where REGION = ? and CACHE_KEY = ?",
-                getJdbcDiskCacheAttributes().getTableName());
-        // remove all keys of the same name group.
-        final String sqlPartial = String.format("delete from %s where REGION = ? and CACHE_KEY like ?",
-                getJdbcDiskCacheAttributes().getTableName());
-
-        try (Connection con = getDataSource().getConnection())
-        {
-            boolean partial = key.toString().endsWith(ICache.NAME_COMPONENT_DELIMITER);
-            String sql = partial ? sqlPartial : sqlSingle;
-
-            try (PreparedStatement psSelect = con.prepareStatement(sql))
-            {
-                psSelect.setString( 1, this.getCacheName() );
-                if ( partial )
-                {
-                    psSelect.setString( 2, key.toString() + "%" );
-                }
-                else
-                {
-                    psSelect.setString( 2, key.toString() );
-                }
-
-                psSelect.executeUpdate();
-
-                setAlive(true);
-            }
-            catch ( final SQLException e )
-            {
-                log.error( "Problem creating statement. sql [{0}]", sql, e );
-                setAlive(false);
-            }
-        }
-        catch ( final SQLException e )
-        {
-            log.error( "Problem updating cache.", e );
-            reset();
-        }
-        return false;
-    }
-
-    /**
-     * This should remove all elements. The auxiliary can be configured to forbid this behavior. If
-     * remove all is not allowed, the method balks.
-     */
-    @Override
-    protected void processRemoveAll()
-    {
-        // it should never get here from the abstract disk cache.
-        if ( this.jdbcDiskCacheAttributes.isAllowRemoveAll() )
-        {
-            final String sql = String.format("delete from %s where REGION = ?",
-                    getJdbcDiskCacheAttributes().getTableName());
-
-            try (Connection con = getDataSource().getConnection())
-            {
-                try (PreparedStatement psDelete = con.prepareStatement( sql ))
-                {
-                    psDelete.setString( 1, this.getCacheName() );
-                    setAlive(true);
-                    psDelete.executeUpdate();
-                }
-                catch ( final SQLException e )
-                {
-                    log.error( "Problem creating statement.", e );
-                    setAlive(false);
-                }
-            }
-            catch ( final SQLException e )
-            {
-                log.error( "Problem removing all.", e );
-                reset();
-            }
-        }
-        else
-        {
-            log.info( "RemoveAll was requested but the request was not fulfilled: "
-                    + "allowRemoveAll is set to false." );
-        }
-    }
-
-    /**
-     * Removed the expired. (now - create time) &gt; max life seconds * 1000
-     * <p>
-     * @return the number deleted
-     */
-    protected int deleteExpired()
-    {
-        int deleted = 0;
-
-        try (Connection con = getDataSource().getConnection())
-        {
-            // The shrinker thread might kick in before the table is created
-            // So check if the table exists first
-            final DatabaseMetaData dmd = con.getMetaData();
-            final ResultSet result = dmd.getTables(null, null,
-                    getJdbcDiskCacheAttributes().getTableName(), null);
-
-            if (result.next())
-            {
-                getTableState().setState( TableState.DELETE_RUNNING );
-                final long now = System.currentTimeMillis() / 1000;
-
-                final String sql = String.format("delete from %s where IS_ETERNAL = ? and REGION = ?"
-                        + " and ? > SYSTEM_EXPIRE_TIME_SECONDS", getJdbcDiskCacheAttributes().getTableName());
-
-                try (PreparedStatement psDelete = con.prepareStatement( sql ))
-                {
-                    psDelete.setString( 1, "F" );
-                    psDelete.setString( 2, this.getCacheName() );
-                    psDelete.setLong( 3, now );
-
-                    setAlive(true);
-
-                    deleted = psDelete.executeUpdate();
-                }
-                catch ( final SQLException e )
-                {
-                    log.error( "Problem creating statement.", e );
-                    setAlive(false);
-                }
-
-                logApplicationEvent( getAuxiliaryCacheAttributes().getName(), "deleteExpired",
-                                     "Deleted expired elements.  URL: " + getDiskLocation() );
-            }
-            else
-            {
-                log.warn( "Trying to shrink non-existing table [{0}]",
-                        getJdbcDiskCacheAttributes().getTableName() );
-            }
-        }
-        catch ( final SQLException e )
-        {
-            logError( getAuxiliaryCacheAttributes().getName(), "deleteExpired",
-                    e.getMessage() + " URL: " + getDiskLocation() );
-            log.error( "Problem removing expired elements from the table.", e );
-            reset();
-        }
-        finally
-        {
-            getTableState().setState( TableState.FREE );
-        }
-
-        return deleted;
-    }
-
-    /**
-     * Typically this is used to handle errors by last resort, force content update, or removeall
-     */
-    public void reset()
-    {
-        // nothing
-    }
-
-    /** Shuts down the pool */
-    @Override
-    public void processDispose()
-    {
-        final ICacheEvent<K> cacheEvent = createICacheEvent( getCacheName(), null, ICacheEventLogger.DISPOSE_EVENT );
-
-        try
-        {
-        	dsFactory.close();
-        }
-        catch ( final SQLException e )
-        {
-            log.error( "Problem shutting down.", e );
-        }
-        finally
-        {
-            logICacheEvent( cacheEvent );
-        }
-    }
-
-    /**
-     * Returns the current cache size. Just does a count(*) for the region.
-     * <p>
-     * @return The size value
-     */
-    @Override
-    public int getSize()
-    {
-        int size = 0;
-
-        // region, key
-        final String selectString = String.format("select count(*) from %s where REGION = ?",
-                getJdbcDiskCacheAttributes().getTableName());
-
-        try (Connection con = getDataSource().getConnection())
-        {
-            try (PreparedStatement psSelect = con.prepareStatement( selectString ))
-            {
-                psSelect.setString( 1, this.getCacheName() );
-
-                try (ResultSet rs = psSelect.executeQuery())
-                {
-                    if ( rs.next() )
-                    {
-                        size = rs.getInt( 1 );
-                    }
-                }
-            }
-        }
-        catch ( final SQLException e )
-        {
-            log.error( "Problem getting size.", e );
-        }
-
-        return size;
-    }
-
-    /**
-     * Return the keys in this cache.
-     * <p>
-     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#getKeySet()
-     */
-    @Override
-    public Set<K> getKeySet() throws IOException
-    {
-        throw new UnsupportedOperationException( "Groups not implemented." );
-        // return null;
-    }
-
-    /**
-     * @param jdbcDiskCacheAttributes The jdbcDiskCacheAttributes to set.
-     */
-    protected void setJdbcDiskCacheAttributes( final JDBCDiskCacheAttributes jdbcDiskCacheAttributes )
-    {
-        this.jdbcDiskCacheAttributes = jdbcDiskCacheAttributes;
-    }
-
-    /**
-     * @return Returns the jdbcDiskCacheAttributes.
-     */
-    protected JDBCDiskCacheAttributes getJdbcDiskCacheAttributes()
-    {
-        return jdbcDiskCacheAttributes;
-    }
-
-    /**
-     * @return Returns the AuxiliaryCacheAttributes.
-     */
-    @Override
-    public AuxiliaryCacheAttributes getAuxiliaryCacheAttributes()
-    {
-        return this.getJdbcDiskCacheAttributes();
-    }
-
-    /**
-     * Extends the parent stats.
-     * <p>
-     * @return IStats
-     */
-    @Override
-    public IStats getStatistics()
-    {
-        final IStats stats = super.getStatistics();
-        stats.setTypeName( "JDBC/Abstract Disk Cache" );
-
-        final List<IStatElement<?>> elems = stats.getStatElements();
-
-        elems.add(new StatElement<>( "Update Count", updateCount ) );
-        elems.add(new StatElement<>( "Get Count", getCount ) );
-        elems.add(new StatElement<>( "Get Matching Count", getMatchingCount ) );
-        elems.add(new StatElement<>( "DB URL", getJdbcDiskCacheAttributes().getUrl()) );
-
-        stats.setStatElements( elems );
-
-        return stats;
-    }
-
-    /**
-     * Returns the name of the table.
-     * <p>
-     * @return the table name or UNDEFINED
-     */
-    protected String getTableName()
-    {
-        String name = "UNDEFINED";
-        if ( this.getJdbcDiskCacheAttributes() != null )
-        {
-            name = this.getJdbcDiskCacheAttributes().getTableName();
-        }
-        return name;
-    }
-
-    /**
-     * @param tableState The tableState to set.
-     */
-    public void setTableState( final TableState tableState )
-    {
-        this.tableState = tableState;
-    }
-
-    /**
-     * @return Returns the tableState.
-     */
-    public TableState getTableState()
-    {
-        return tableState;
-    }
-
-    /**
-     * This is used by the event logging.
-     * <p>
-     * @return the location of the disk, either path or ip.
-     */
-    @Override
-    protected String getDiskLocation()
-    {
-        return this.jdbcDiskCacheAttributes.getUrl();
-    }
-
-    /**
-     * Public so managers can access it.
-     * @return the dsFactory
-     * @throws SQLException if getting a data source fails
-     */
-    public DataSource getDataSource() throws SQLException
-    {
-        return dsFactory.getDataSource();
-    }
-
-    /**
-     * For debugging.
-     * <p>
-     * @return this.getStats();
-     */
-    @Override
-    public String toString()
-    {
-        return this.getStats();
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.jdbc;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Timestamp;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import javax.sql.DataSource;
+
+import org.apache.commons.jcs3.auxiliary.AuxiliaryCacheAttributes;
+import org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache;
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.DataSourceFactory;
+import org.apache.commons.jcs3.engine.behavior.ICache;
+import org.apache.commons.jcs3.engine.behavior.ICacheElement;
+import org.apache.commons.jcs3.engine.logging.behavior.ICacheEvent;
+import org.apache.commons.jcs3.engine.logging.behavior.ICacheEventLogger;
+import org.apache.commons.jcs3.engine.stats.StatElement;
+import org.apache.commons.jcs3.engine.stats.behavior.IStatElement;
+import org.apache.commons.jcs3.engine.stats.behavior.IStats;
+import org.apache.commons.jcs3.log.Log;
+import org.apache.commons.jcs3.log.LogManager;
+
+/**
+ * This is the jdbc disk cache plugin.
+ * <p>
+ * It expects a table created by the following script. The table name is configurable.
+ * </p>
+ * <pre>
+ *                       drop TABLE JCS_STORE;
+ *                       CREATE TABLE JCS_STORE
+ *                       (
+ *                       CACHE_KEY                  VARCHAR(250)          NOT NULL,
+ *                       REGION                     VARCHAR(250)          NOT NULL,
+ *                       ELEMENT                    BLOB,
+ *                       CREATE_TIME                TIMESTAMP,
+ *                       UPDATE_TIME_SECONDS        BIGINT,
+ *                       MAX_LIFE_SECONDS           BIGINT,
+ *                       SYSTEM_EXPIRE_TIME_SECONDS BIGINT,
+ *                       IS_ETERNAL                 CHAR(1),
+ *                       PRIMARY KEY (CACHE_KEY, REGION)
+ *                       );
+ * </pre>
+ * <p>
+ * The cleanup thread will delete non eternal items where (now - create time) &gt; max life seconds *
+ * 1000
+ * </p>
+ * <p>
+ * To speed up the deletion the SYSTEM_EXPIRE_TIME_SECONDS is used instead. It is recommended that
+ * an index be created on this column is you will have over a million records.
+ * </p>
+ */
+public class JDBCDiskCache<K, V>
+    extends AbstractDiskCache<K, V>
+{
+    /** The local logger. */
+    private static final Log log = LogManager.getLog( JDBCDiskCache.class );
+
+    /** configuration */
+    private JDBCDiskCacheAttributes jdbcDiskCacheAttributes;
+
+    /** # of times update was called */
+    private final AtomicInteger updateCount = new AtomicInteger(0);
+
+    /** # of times get was called */
+    private final AtomicInteger getCount = new AtomicInteger(0);
+
+    /** # of times getMatching was called */
+    private final AtomicInteger getMatchingCount = new AtomicInteger(0);
+
+    /** db connection pool */
+    private final DataSourceFactory dsFactory;
+
+    /** tracks optimization */
+    private TableState tableState;
+
+    /**
+     * Constructs a JDBC Disk Cache for the provided cache attributes. The table state object is
+     * used to mark deletions.
+     * <p>
+     * @param cattr the configuration object for this cache
+     * @param dsFactory the DataSourceFactory for this cache
+     * @param tableState an object to track table operations
+     */
+    public JDBCDiskCache(final JDBCDiskCacheAttributes cattr, final DataSourceFactory dsFactory, final TableState tableState)
+    {
+        super( cattr );
+
+        setTableState( tableState );
+        setJdbcDiskCacheAttributes( cattr );
+
+        log.info( "jdbcDiskCacheAttributes = {0}", this::getJdbcDiskCacheAttributes);
+
+        // This initializes the pool access.
+        this.dsFactory = dsFactory;
+
+        // Initialization finished successfully, so set alive to true.
+        setAlive(true);
+    }
+
+    /**
+     * Inserts or updates. By default it will try to insert. If the item exists we will get an
+     * error. It will then update. This behavior is configurable. The cache can be configured to
+     * check before inserting.
+     * <p>
+     * @param ce
+     */
+    @Override
+    protected void processUpdate( final ICacheElement<K, V> ce )
+    {
+    	updateCount.incrementAndGet();
+
+        log.debug( "updating, ce = {0}", ce );
+
+        try (Connection con = getDataSource().getConnection())
+        {
+            log.debug( "Putting [{0}] on disk.", ce::getKey);
+
+            try
+            {
+                final byte[] element = getElementSerializer().serialize( ce );
+                insertOrUpdate( ce, con, element );
+            }
+            catch ( final IOException e )
+            {
+                log.error( "Could not serialize element", e );
+            }
+        }
+        catch ( final SQLException e )
+        {
+            log.error( "Problem getting connection.", e );
+        }
+    }
+
+    /**
+     * If test before insert it true, we check to see if the element exists. If the element exists
+     * we will update. Otherwise, we try inserting.  If this fails because the item exists, we will
+     * update.
+     * <p>
+     * @param ce
+     * @param con
+     * @param element
+     */
+    private void insertOrUpdate( final ICacheElement<K, V> ce, final Connection con, final byte[] element )
+    {
+        boolean exists = false;
+
+        // First do a query to determine if the element already exists
+        if ( this.getJdbcDiskCacheAttributes().isTestBeforeInsert() )
+        {
+            exists = doesElementExist( ce, con );
+        }
+
+        // If it doesn't exist, insert it, otherwise update
+        if ( !exists )
+        {
+            exists = insertRow( ce, con, element );
+        }
+
+        // update if it exists.
+        if ( exists )
+        {
+            updateRow( ce, con, element );
+        }
+    }
+
+    /**
+     * This inserts a new row in the database.
+     * <p>
+     * @param ce
+     * @param con
+     * @param element
+     * @return true if the insertion fails because the record exists.
+     */
+    private boolean insertRow( final ICacheElement<K, V> ce, final Connection con, final byte[] element )
+    {
+        boolean exists = false;
+        final String sqlI = String.format("insert into %s"
+                + " (CACHE_KEY, REGION, ELEMENT, MAX_LIFE_SECONDS, IS_ETERNAL, CREATE_TIME, UPDATE_TIME_SECONDS,"
+                + " SYSTEM_EXPIRE_TIME_SECONDS) "
+                + " values (?, ?, ?, ?, ?, ?, ?, ?)", getJdbcDiskCacheAttributes().getTableName());
+
+        try (PreparedStatement psInsert = con.prepareStatement( sqlI ))
+        {
+            psInsert.setString( 1, ce.getKey().toString() );
+            psInsert.setString( 2, this.getCacheName() );
+            psInsert.setBytes( 3, element );
+            psInsert.setLong( 4, ce.getElementAttributes().getMaxLife() );
+            psInsert.setString( 5, ce.getElementAttributes().getIsEternal() ? "T" : "F" );
+
+            final Timestamp createTime = new Timestamp( ce.getElementAttributes().getCreateTime() );
+            psInsert.setTimestamp( 6, createTime );
+
+            final long now = System.currentTimeMillis() / 1000;
+            psInsert.setLong( 7, now );
+
+            final long expireTime = now + ce.getElementAttributes().getMaxLife();
+            psInsert.setLong( 8, expireTime );
+
+            psInsert.execute();
+        }
+        catch ( final SQLException e )
+        {
+            if ("23000".equals(e.getSQLState()))
+            {
+                exists = true;
+            }
+            else
+            {
+                log.error( "Could not insert element", e );
+            }
+
+            // see if it exists, if we didn't already
+            if ( !exists && !this.getJdbcDiskCacheAttributes().isTestBeforeInsert() )
+            {
+                exists = doesElementExist( ce, con );
+            }
+        }
+
+        return exists;
+    }
+
+    /**
+     * This updates a row in the database.
+     * <p>
+     * @param ce
+     * @param con
+     * @param element
+     */
+    private void updateRow( final ICacheElement<K, V> ce, final Connection con, final byte[] element )
+    {
+        final String sqlU = String.format("update %s"
+                + " set ELEMENT  = ?, CREATE_TIME = ?, UPDATE_TIME_SECONDS = ?, " + " SYSTEM_EXPIRE_TIME_SECONDS = ? "
+                + " where CACHE_KEY = ? and REGION = ?", getJdbcDiskCacheAttributes().getTableName());
+
+        try (PreparedStatement psUpdate = con.prepareStatement( sqlU ))
+        {
+            psUpdate.setBytes( 1, element );
+
+            final Timestamp createTime = new Timestamp( ce.getElementAttributes().getCreateTime() );
+            psUpdate.setTimestamp( 2, createTime );
+
+            final long now = System.currentTimeMillis() / 1000;
+            psUpdate.setLong( 3, now );
+
+            final long expireTime = now + ce.getElementAttributes().getMaxLife();
+            psUpdate.setLong( 4, expireTime );
+
+            psUpdate.setString( 5, (String) ce.getKey() );
+            psUpdate.setString( 6, this.getCacheName() );
+            psUpdate.execute();
+
+            log.debug( "ran update {0}", sqlU );
+        }
+        catch ( final SQLException e )
+        {
+            log.error( "Error executing update sql [{0}]", sqlU, e );
+        }
+    }
+
+    /**
+     * Does an element exist for this key?
+     * <p>
+     * @param ce the cache element
+     * @param con a database connection
+     * @return boolean
+     */
+    protected boolean doesElementExist( final ICacheElement<K, V> ce, final Connection con )
+    {
+        boolean exists = false;
+        // don't select the element, since we want this to be fast.
+        final String sqlS = String.format("select CACHE_KEY from %s where REGION = ? and CACHE_KEY = ?",
+                getJdbcDiskCacheAttributes().getTableName());
+
+        try (PreparedStatement psSelect = con.prepareStatement( sqlS ))
+        {
+            psSelect.setString( 1, this.getCacheName() );
+            psSelect.setString( 2, (String) ce.getKey() );
+
+            try (ResultSet rs = psSelect.executeQuery())
+            {
+                exists = rs.next();
+            }
+
+            log.debug( "[{0}] existing status is {1}", ce.getKey(), exists );
+        }
+        catch ( final SQLException e )
+        {
+            log.error( "Problem looking for item before insert.", e );
+        }
+
+        return exists;
+    }
+
+    /**
+     * Queries the database for the value. If it gets a result, the value is deserialized.
+     * <p>
+     * @param key
+     * @return ICacheElement
+     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#get(Object)
+     */
+    @Override
+    protected ICacheElement<K, V> processGet( final K key )
+    {
+    	getCount.incrementAndGet();
+
+        log.debug( "Getting [{0}] from disk", key );
+
+        if ( !isAlive() )
+        {
+            return null;
+        }
+
+        ICacheElement<K, V> obj = null;
+
+        // region, key
+        final String selectString = String.format("select ELEMENT from %s where REGION = ? and CACHE_KEY = ?",
+                getJdbcDiskCacheAttributes().getTableName());
+
+        try (Connection con = getDataSource().getConnection())
+        {
+            try (PreparedStatement psSelect = con.prepareStatement( selectString ))
+            {
+                psSelect.setString( 1, this.getCacheName() );
+                psSelect.setString( 2, key.toString() );
+
+                try (ResultSet rs = psSelect.executeQuery())
+                {
+                    byte[] data = null;
+
+                    if ( rs.next() )
+                    {
+                        data = rs.getBytes( 1 );
+                    }
+
+                    if ( data != null )
+                    {
+                        try
+                        {
+                            // USE THE SERIALIZER
+                            obj = getElementSerializer().deSerialize( data, null );
+                        }
+                        catch ( final IOException | ClassNotFoundException e )
+                        {
+                            log.error( "Problem getting item for key [{0}]", key, e );
+                        }
+                    }
+                }
+            }
+        }
+        catch ( final SQLException sqle )
+        {
+            log.error( "Caught a SQL exception trying to get the item for key [{0}]",
+                    key, sqle );
+        }
+
+        return obj;
+    }
+
+    /**
+     * This will run a like query. It will try to construct a usable query but different
+     * implementations will be needed to adjust the syntax.
+     * <p>
+     * @param pattern
+     * @return key,value map
+     */
+    @Override
+    protected Map<K, ICacheElement<K, V>> processGetMatching( final String pattern )
+    {
+    	getMatchingCount.incrementAndGet();
+
+        log.debug( "Getting [{0}] from disk", pattern);
+
+        if ( !isAlive() )
+        {
+            return null;
+        }
+
+        final Map<K, ICacheElement<K, V>> results = new HashMap<>();
+
+        // region, key
+        final String selectString = String.format("select ELEMENT from %s where REGION = ? and CACHE_KEY like ?",
+                getJdbcDiskCacheAttributes().getTableName());
+
+        try (Connection con = getDataSource().getConnection())
+        {
+            try (PreparedStatement psSelect = con.prepareStatement( selectString ))
+            {
+                psSelect.setString( 1, this.getCacheName() );
+                psSelect.setString( 2, constructLikeParameterFromPattern( pattern ) );
+
+                try (ResultSet rs = psSelect.executeQuery())
+                {
+                    while ( rs.next() )
+                    {
+                        final byte[] data = rs.getBytes(1);
+                        if ( data != null )
+                        {
+                            try
+                            {
+                                // USE THE SERIALIZER
+                                final ICacheElement<K, V> value = getElementSerializer().deSerialize( data, null );
+                                results.put( value.getKey(), value );
+                            }
+                            catch ( final IOException | ClassNotFoundException e )
+                            {
+                                log.error( "Problem getting items for pattern [{0}]", pattern, e );
+                            }
+                        }
+                    }
+                }
+            }
+        }
+        catch ( final SQLException sqle )
+        {
+            log.error( "Caught a SQL exception trying to get items for pattern [{0}]",
+                    pattern, sqle );
+        }
+
+        return results;
+    }
+
+    /**
+     * @param pattern
+     * @return String to use in the like query.
+     */
+    public String constructLikeParameterFromPattern( final String pattern )
+    {
+        String likePattern = pattern.replaceAll( "\\.\\+", "%" );
+        likePattern = likePattern.replaceAll( "\\.", "_" );
+
+        log.debug( "pattern = [{0}]", likePattern );
+
+        return likePattern;
+    }
+
+    /**
+     * Returns true if the removal was successful; or false if there is nothing to remove. Current
+     * implementation always results in a disk orphan.
+     * <p>
+     * @param key
+     * @return boolean
+     */
+    @Override
+    protected boolean processRemove( final K key )
+    {
+        // remove single item.
+        final String sqlSingle = String.format("delete from %s where REGION = ? and CACHE_KEY = ?",
+                getJdbcDiskCacheAttributes().getTableName());
+        // remove all keys of the same name group.
+        final String sqlPartial = String.format("delete from %s where REGION = ? and CACHE_KEY like ?",
+                getJdbcDiskCacheAttributes().getTableName());
+
+        try (Connection con = getDataSource().getConnection())
+        {
+            boolean partial = key.toString().endsWith(ICache.NAME_COMPONENT_DELIMITER);
+            String sql = partial ? sqlPartial : sqlSingle;
+
+            try (PreparedStatement psSelect = con.prepareStatement(sql))
+            {
+                psSelect.setString( 1, this.getCacheName() );
+                if ( partial )
+                {
+                    psSelect.setString( 2, key.toString() + "%" );
+                }
+                else
+                {
+                    psSelect.setString( 2, key.toString() );
+                }
+
+                psSelect.executeUpdate();
+
+                setAlive(true);
+            }
+            catch ( final SQLException e )
+            {
+                log.error( "Problem creating statement. sql [{0}]", sql, e );
+                setAlive(false);
+            }
+        }
+        catch ( final SQLException e )
+        {
+            log.error( "Problem updating cache.", e );
+            reset();
+        }
+        return false;
+    }
+
+    /**
+     * This should remove all elements. The auxiliary can be configured to forbid this behavior. If
+     * remove all is not allowed, the method balks.
+     */
+    @Override
+    protected void processRemoveAll()
+    {
+        // it should never get here from the abstract disk cache.
+        if ( this.jdbcDiskCacheAttributes.isAllowRemoveAll() )
+        {
+            final String sql = String.format("delete from %s where REGION = ?",
+                    getJdbcDiskCacheAttributes().getTableName());
+
+            try (Connection con = getDataSource().getConnection())
+            {
+                try (PreparedStatement psDelete = con.prepareStatement( sql ))
+                {
+                    psDelete.setString( 1, this.getCacheName() );
+                    setAlive(true);
+                    psDelete.executeUpdate();
+                }
+                catch ( final SQLException e )
+                {
+                    log.error( "Problem creating statement.", e );
+                    setAlive(false);
+                }
+            }
+            catch ( final SQLException e )
+            {
+                log.error( "Problem removing all.", e );
+                reset();
+            }
+        }
+        else
+        {
+            log.info( "RemoveAll was requested but the request was not fulfilled: "
+                    + "allowRemoveAll is set to false." );
+        }
+    }
+
+    /**
+     * Removed the expired. (now - create time) &gt; max life seconds * 1000
+     * <p>
+     * @return the number deleted
+     */
+    protected int deleteExpired()
+    {
+        int deleted = 0;
+
+        try (Connection con = getDataSource().getConnection())
+        {
+            // The shrinker thread might kick in before the table is created
+            // So check if the table exists first
+            final DatabaseMetaData dmd = con.getMetaData();
+            final ResultSet result = dmd.getTables(null, null,
+                    getJdbcDiskCacheAttributes().getTableName(), null);
+
+            if (result.next())
+            {
+                getTableState().setState( TableState.DELETE_RUNNING );
+                final long now = System.currentTimeMillis() / 1000;
+
+                final String sql = String.format("delete from %s where IS_ETERNAL = ? and REGION = ?"
+                        + " and ? > SYSTEM_EXPIRE_TIME_SECONDS", getJdbcDiskCacheAttributes().getTableName());
+
+                try (PreparedStatement psDelete = con.prepareStatement( sql ))
+                {
+                    psDelete.setString( 1, "F" );
+                    psDelete.setString( 2, this.getCacheName() );
+                    psDelete.setLong( 3, now );
+
+                    setAlive(true);
+
+                    deleted = psDelete.executeUpdate();
+                }
+                catch ( final SQLException e )
+                {
+                    log.error( "Problem creating statement.", e );
+                    setAlive(false);
+                }
+
+                logApplicationEvent( getAuxiliaryCacheAttributes().getName(), "deleteExpired",
+                                     "Deleted expired elements.  URL: " + getDiskLocation() );
+            }
+            else
+            {
+                log.warn( "Trying to shrink non-existing table [{0}]",
+                        getJdbcDiskCacheAttributes().getTableName() );
+            }
+        }
+        catch ( final SQLException e )
+        {
+            logError( getAuxiliaryCacheAttributes().getName(), "deleteExpired",
+                    e.getMessage() + " URL: " + getDiskLocation() );
+            log.error( "Problem removing expired elements from the table.", e );
+            reset();
+        }
+        finally
+        {
+            getTableState().setState( TableState.FREE );
+        }
+
+        return deleted;
+    }
+
+    /**
+     * Typically this is used to handle errors by last resort, force content update, or removeall
+     */
+    public void reset()
+    {
+        // nothing
+    }
+
+    /** Shuts down the pool */
+    @Override
+    public void processDispose()
+    {
+        final ICacheEvent<K> cacheEvent = createICacheEvent( getCacheName(), null, ICacheEventLogger.DISPOSE_EVENT );
+
+        try
+        {
+        	dsFactory.close();
+        }
+        catch ( final SQLException e )
+        {
+            log.error( "Problem shutting down.", e );
+        }
+        finally
+        {
+            logICacheEvent( cacheEvent );
+        }
+    }
+
+    /**
+     * Returns the current cache size. Just does a count(*) for the region.
+     * <p>
+     * @return The size value
+     */
+    @Override
+    public int getSize()
+    {
+        int size = 0;
+
+        // region, key
+        final String selectString = String.format("select count(*) from %s where REGION = ?",
+                getJdbcDiskCacheAttributes().getTableName());
+
+        try (Connection con = getDataSource().getConnection())
+        {
+            try (PreparedStatement psSelect = con.prepareStatement( selectString ))
+            {
+                psSelect.setString( 1, this.getCacheName() );
+
+                try (ResultSet rs = psSelect.executeQuery())
+                {
+                    if ( rs.next() )
+                    {
+                        size = rs.getInt( 1 );
+                    }
+                }
+            }
+        }
+        catch ( final SQLException e )
+        {
+            log.error( "Problem getting size.", e );
+        }
+
+        return size;
+    }
+
+    /**
+     * Return the keys in this cache.
+     * <p>
+     * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#getKeySet()
+     */
+    @Override
+    public Set<K> getKeySet() throws IOException
+    {
+        throw new UnsupportedOperationException( "Groups not implemented." );
+        // return null;
+    }
+
+    /**
+     * @param jdbcDiskCacheAttributes The jdbcDiskCacheAttributes to set.
+     */
+    protected void setJdbcDiskCacheAttributes( final JDBCDiskCacheAttributes jdbcDiskCacheAttributes )
+    {
+        this.jdbcDiskCacheAttributes = jdbcDiskCacheAttributes;
+    }
+
+    /**
+     * @return Returns the jdbcDiskCacheAttributes.
+     */
+    protected JDBCDiskCacheAttributes getJdbcDiskCacheAttributes()
+    {
+        return jdbcDiskCacheAttributes;
+    }
+
+    /**
+     * @return Returns the AuxiliaryCacheAttributes.
+     */
+    @Override
+    public AuxiliaryCacheAttributes getAuxiliaryCacheAttributes()
+    {
+        return this.getJdbcDiskCacheAttributes();
+    }
+
+    /**
+     * Extends the parent stats.
+     * <p>
+     * @return IStats
+     */
+    @Override
+    public IStats getStatistics()
+    {
+        final IStats stats = super.getStatistics();
+        stats.setTypeName( "JDBC/Abstract Disk Cache" );
+
+        final List<IStatElement<?>> elems = stats.getStatElements();
+
+        elems.add(new StatElement<>( "Update Count", updateCount ) );
+        elems.add(new StatElement<>( "Get Count", getCount ) );
+        elems.add(new StatElement<>( "Get Matching Count", getMatchingCount ) );
+        elems.add(new StatElement<>( "DB URL", getJdbcDiskCacheAttributes().getUrl()) );
+
+        stats.setStatElements( elems );
+
+        return stats;
+    }
+
+    /**
+     * Returns the name of the table.
+     * <p>
+     * @return the table name or UNDEFINED
+     */
+    protected String getTableName()
+    {
+        String name = "UNDEFINED";
+        if ( this.getJdbcDiskCacheAttributes() != null )
+        {
+            name = this.getJdbcDiskCacheAttributes().getTableName();
+        }
+        return name;
+    }
+
+    /**
+     * @param tableState The tableState to set.
+     */
+    public void setTableState( final TableState tableState )
+    {
+        this.tableState = tableState;
+    }
+
+    /**
+     * @return Returns the tableState.
+     */
+    public TableState getTableState()
+    {
+        return tableState;
+    }
+
+    /**
+     * This is used by the event logging.
+     * <p>
+     * @return the location of the disk, either path or ip.
+     */
+    @Override
+    protected String getDiskLocation()
+    {
+        return this.jdbcDiskCacheAttributes.getUrl();
+    }
+
+    /**
+     * Public so managers can access it.
+     * @return the dsFactory
+     * @throws SQLException if getting a data source fails
+     */
+    public DataSource getDataSource() throws SQLException
+    {
+        return dsFactory.getDataSource();
+    }
+
+    /**
+     * For debugging.
+     * <p>
+     * @return this.getStats();
+     */
+    @Override
+    public String toString()
+    {
+        return this.getStats();
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/JDBCDiskCacheAttributes.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/JDBCDiskCacheAttributes.java
index 1905f9c1..eb015c31 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/JDBCDiskCacheAttributes.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/JDBCDiskCacheAttributes.java
@@ -1,331 +1,329 @@
-package org.apache.commons.jcs3.auxiliary.disk.jdbc;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCacheAttributes;
-
-/**
- * The configurator will set these values based on what is in the cache.ccf file.
- * <p>
- * @author Aaron Smuts
- */
-public class JDBCDiskCacheAttributes
-    extends AbstractDiskCacheAttributes
-{
-    /** Don't change */
-    private static final long serialVersionUID = -6535808344813320062L;
-
-    /** default */
-    private static final String DEFAULT_TABLE_NAME = "JCS_STORE";
-
-    /** DB username */
-    private String userName;
-
-    /** DB password */
-    private String password;
-
-    /** URL for the db */
-    private String url;
-
-    /** The name of the database. */
-    private String database = "";
-
-    /** The driver */
-    private String driverClassName;
-
-    /** The JNDI path. */
-    private String jndiPath;
-
-    /** The time between two JNDI lookups */
-    private long jndiTTL;
-
-    /** The table name */
-    private String tableName = DEFAULT_TABLE_NAME;
-
-    /** If false we will insert and if it fails we will update. */
-    private boolean testBeforeInsert = true;
-
-    /** This is the default limit on the maximum number of active connections. */
-    public static final int DEFAULT_MAX_TOTAL = 10;
-
-    /** Max connections allowed */
-    private int maxTotal = DEFAULT_MAX_TOTAL;
-
-    /** This is the default setting for the cleanup routine. */
-    public static final int DEFAULT_SHRINKER_INTERVAL_SECONDS = 300;
-
-    /** How often should we remove expired. */
-    private int shrinkerIntervalSeconds = DEFAULT_SHRINKER_INTERVAL_SECONDS;
-
-    /** Should we remove expired in the background. */
-    private boolean useDiskShrinker = true;
-
-    /** The default Pool Name to which the connection pool will be keyed. */
-    public static final String DEFAULT_POOL_NAME = "jcs";
-
-    /**
-     * If a pool name is supplied, the manager will attempt to load it. It should be configured in a
-     * separate section as follows. Assuming the name is "MyPool":
-     *
-     * <pre>
-     * jcs.jdbcconnectionpool.MyPool.attributes.userName=MyUserName
-     * jcs.jdbcconnectionpool.MyPool.attributes.password=MyPassword
-     * jcs.jdbcconnectionpool.MyPool.attributes.url=MyUrl
-     * jcs.jdbcconnectionpool.MyPool.attributes.maxActive=MyMaxActive
-     * jcs.jdbcconnectionpool.MyPool.attributes.driverClassName=MyDriverClassName
-     * </pre>
-     */
-    private String connectionPoolName;
-
-    /**
-     * @param userName The userName to set.
-     */
-    public void setUserName( final String userName )
-    {
-        this.userName = userName;
-    }
-
-    /**
-     * @return Returns the userName.
-     */
-    public String getUserName()
-    {
-        return userName;
-    }
-
-    /**
-     * @param password The password to set.
-     */
-    public void setPassword( final String password )
-    {
-        this.password = password;
-    }
-
-    /**
-     * @return Returns the password.
-     */
-    public String getPassword()
-    {
-        return password;
-    }
-
-    /**
-     * @param url The url to set.
-     */
-    public void setUrl( final String url )
-    {
-        this.url = url;
-    }
-
-    /**
-     * @return Returns the url.
-     */
-    public String getUrl()
-    {
-        return url;
-    }
-
-    /**
-     * This is appended to the url.
-     * @param database The database to set.
-     */
-    public void setDatabase( final String database )
-    {
-        this.database = database;
-    }
-
-    /**
-     * @return Returns the database.
-     */
-    public String getDatabase()
-    {
-        return database;
-    }
-
-    /**
-     * @param driverClassName The driverClassName to set.
-     */
-    public void setDriverClassName( final String driverClassName )
-    {
-        this.driverClassName = driverClassName;
-    }
-
-    /**
-     * @return Returns the driverClassName.
-     */
-    public String getDriverClassName()
-    {
-        return driverClassName;
-    }
-
-    /**
-	 * @return the jndiPath
-	 */
-	public String getJndiPath()
-	{
-		return jndiPath;
-	}
-
-	/**
-	 * @param jndiPath the jndiPath to set
-	 */
-	public void setJndiPath(final String jndiPath)
-	{
-		this.jndiPath = jndiPath;
-	}
-
-	/**
-	 * @return the jndiTTL
-	 */
-	public long getJndiTTL()
-	{
-		return jndiTTL;
-	}
-
-	/**
-	 * @param jndiTTL the jndiTTL to set
-	 */
-	public void setJndiTTL(final long jndiTTL)
-	{
-		this.jndiTTL = jndiTTL;
-	}
-
-	/**
-     * @param tableName The tableName to set.
-     */
-    public void setTableName( final String tableName )
-    {
-        this.tableName = tableName;
-    }
-
-    /**
-     * @return Returns the tableName.
-     */
-    public String getTableName()
-    {
-        return tableName;
-    }
-
-    /**
-     * If this is true then the disk cache will check to see if the item already exists in the
-     * database. If it is false, it will try to insert. If the insert fails it will try to update.
-     * <p>
-     * @param testBeforeInsert The testBeforeInsert to set.
-     */
-    public void setTestBeforeInsert( final boolean testBeforeInsert )
-    {
-        this.testBeforeInsert = testBeforeInsert;
-    }
-
-    /**
-     * @return Returns the testBeforeInsert.
-     */
-    public boolean isTestBeforeInsert()
-    {
-        return testBeforeInsert;
-    }
-
-    /**
-     * @param maxActive The maxTotal to set.
-     */
-    public void setMaxTotal( final int maxActive )
-    {
-        this.maxTotal = maxActive;
-    }
-
-    /**
-     * @return Returns the maxTotal.
-     */
-    public int getMaxTotal()
-    {
-        return maxTotal;
-    }
-
-    /**
-     * @param shrinkerIntervalSecondsArg The shrinkerIntervalSeconds to set.
-     */
-    public void setShrinkerIntervalSeconds( final int shrinkerIntervalSecondsArg )
-    {
-        this.shrinkerIntervalSeconds = shrinkerIntervalSecondsArg;
-    }
-
-    /**
-     * @return Returns the shrinkerIntervalSeconds.
-     */
-    public int getShrinkerIntervalSeconds()
-    {
-        return shrinkerIntervalSeconds;
-    }
-
-    /**
-     * @param useDiskShrinker The useDiskShrinker to set.
-     */
-    public void setUseDiskShrinker( final boolean useDiskShrinker )
-    {
-        this.useDiskShrinker = useDiskShrinker;
-    }
-
-    /**
-     * @return Returns the useDiskShrinker.
-     */
-    public boolean isUseDiskShrinker()
-    {
-        return useDiskShrinker;
-    }
-
-    /**
-     * @param connectionPoolName the connectionPoolName to set
-     */
-    public void setConnectionPoolName( final String connectionPoolName )
-    {
-        this.connectionPoolName = connectionPoolName;
-    }
-
-    /**
-     * @return the connectionPoolName
-     */
-    public String getConnectionPoolName()
-    {
-        return connectionPoolName;
-    }
-
-    /**
-     * For debugging.
-     * <p>
-     * @return debug string with most of the properties.
-     */
-    @Override
-    public String toString()
-    {
-        final StringBuilder buf = new StringBuilder();
-        buf.append( "\nJDBCCacheAttributes" );
-        buf.append( "\n UserName [" + getUserName() + "]" );
-        buf.append( "\n Url [" + getUrl() + "]" );
-        buf.append( "\n Database [" + getDatabase() + "]" );
-        buf.append( "\n DriverClassName [" + getDriverClassName() + "]" );
-        buf.append( "\n TableName [" + getTableName() + "]" );
-        buf.append( "\n TestBeforeInsert [" + isTestBeforeInsert() + "]" );
-        buf.append( "\n MaxActive [" + getMaxTotal() + "]" );
-        buf.append( "\n AllowRemoveAll [" + isAllowRemoveAll() + "]" );
-        buf.append( "\n ShrinkerIntervalSeconds [" + getShrinkerIntervalSeconds() + "]" );
-        buf.append( "\n useDiskShrinker [" + isUseDiskShrinker() + "]" );
-        return buf.toString();
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.jdbc;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCacheAttributes;
+
+/**
+ * The configurator will set these values based on what is in the cache.ccf file.
+ */
+public class JDBCDiskCacheAttributes
+    extends AbstractDiskCacheAttributes
+{
+    /** Don't change */
+    private static final long serialVersionUID = -6535808344813320062L;
+
+    /** default */
+    private static final String DEFAULT_TABLE_NAME = "JCS_STORE";
+
+    /** DB username */
+    private String userName;
+
+    /** DB password */
+    private String password;
+
+    /** URL for the db */
+    private String url;
+
+    /** The name of the database. */
+    private String database = "";
+
+    /** The driver */
+    private String driverClassName;
+
+    /** The JNDI path. */
+    private String jndiPath;
+
+    /** The time between two JNDI lookups */
+    private long jndiTTL;
+
+    /** The table name */
+    private String tableName = DEFAULT_TABLE_NAME;
+
+    /** If false we will insert and if it fails we will update. */
+    private boolean testBeforeInsert = true;
+
+    /** This is the default limit on the maximum number of active connections. */
+    public static final int DEFAULT_MAX_TOTAL = 10;
+
+    /** Max connections allowed */
+    private int maxTotal = DEFAULT_MAX_TOTAL;
+
+    /** This is the default setting for the cleanup routine. */
+    public static final int DEFAULT_SHRINKER_INTERVAL_SECONDS = 300;
+
+    /** How often should we remove expired. */
+    private int shrinkerIntervalSeconds = DEFAULT_SHRINKER_INTERVAL_SECONDS;
+
+    /** Should we remove expired in the background. */
+    private boolean useDiskShrinker = true;
+
+    /** The default Pool Name to which the connection pool will be keyed. */
+    public static final String DEFAULT_POOL_NAME = "jcs";
+
+    /**
+     * If a pool name is supplied, the manager will attempt to load it. It should be configured in a
+     * separate section as follows. Assuming the name is "MyPool":
+     *
+     * <pre>
+     * jcs.jdbcconnectionpool.MyPool.attributes.userName=MyUserName
+     * jcs.jdbcconnectionpool.MyPool.attributes.password=MyPassword
+     * jcs.jdbcconnectionpool.MyPool.attributes.url=MyUrl
+     * jcs.jdbcconnectionpool.MyPool.attributes.maxActive=MyMaxActive
+     * jcs.jdbcconnectionpool.MyPool.attributes.driverClassName=MyDriverClassName
+     * </pre>
+     */
+    private String connectionPoolName;
+
+    /**
+     * @param userName The userName to set.
+     */
+    public void setUserName( final String userName )
+    {
+        this.userName = userName;
+    }
+
+    /**
+     * @return Returns the userName.
+     */
+    public String getUserName()
+    {
+        return userName;
+    }
+
+    /**
+     * @param password The password to set.
+     */
+    public void setPassword( final String password )
+    {
+        this.password = password;
+    }
+
+    /**
+     * @return Returns the password.
+     */
+    public String getPassword()
+    {
+        return password;
+    }
+
+    /**
+     * @param url The url to set.
+     */
+    public void setUrl( final String url )
+    {
+        this.url = url;
+    }
+
+    /**
+     * @return Returns the url.
+     */
+    public String getUrl()
+    {
+        return url;
+    }
+
+    /**
+     * This is appended to the url.
+     * @param database The database to set.
+     */
+    public void setDatabase( final String database )
+    {
+        this.database = database;
+    }
+
+    /**
+     * @return Returns the database.
+     */
+    public String getDatabase()
+    {
+        return database;
+    }
+
+    /**
+     * @param driverClassName The driverClassName to set.
+     */
+    public void setDriverClassName( final String driverClassName )
+    {
+        this.driverClassName = driverClassName;
+    }
+
+    /**
+     * @return Returns the driverClassName.
+     */
+    public String getDriverClassName()
+    {
+        return driverClassName;
+    }
+
+    /**
+	 * @return the jndiPath
+	 */
+	public String getJndiPath()
+	{
+		return jndiPath;
+	}
+
+	/**
+	 * @param jndiPath the jndiPath to set
+	 */
+	public void setJndiPath(final String jndiPath)
+	{
+		this.jndiPath = jndiPath;
+	}
+
+	/**
+	 * @return the jndiTTL
+	 */
+	public long getJndiTTL()
+	{
+		return jndiTTL;
+	}
+
+	/**
+	 * @param jndiTTL the jndiTTL to set
+	 */
+	public void setJndiTTL(final long jndiTTL)
+	{
+		this.jndiTTL = jndiTTL;
+	}
+
+	/**
+     * @param tableName The tableName to set.
+     */
+    public void setTableName( final String tableName )
+    {
+        this.tableName = tableName;
+    }
+
+    /**
+     * @return Returns the tableName.
+     */
+    public String getTableName()
+    {
+        return tableName;
+    }
+
+    /**
+     * If this is true then the disk cache will check to see if the item already exists in the
+     * database. If it is false, it will try to insert. If the insert fails it will try to update.
+     * <p>
+     * @param testBeforeInsert The testBeforeInsert to set.
+     */
+    public void setTestBeforeInsert( final boolean testBeforeInsert )
+    {
+        this.testBeforeInsert = testBeforeInsert;
+    }
+
+    /**
+     * @return Returns the testBeforeInsert.
+     */
+    public boolean isTestBeforeInsert()
+    {
+        return testBeforeInsert;
+    }
+
+    /**
+     * @param maxActive The maxTotal to set.
+     */
+    public void setMaxTotal( final int maxActive )
+    {
+        this.maxTotal = maxActive;
+    }
+
+    /**
+     * @return Returns the maxTotal.
+     */
+    public int getMaxTotal()
+    {
+        return maxTotal;
+    }
+
+    /**
+     * @param shrinkerIntervalSecondsArg The shrinkerIntervalSeconds to set.
+     */
+    public void setShrinkerIntervalSeconds( final int shrinkerIntervalSecondsArg )
+    {
+        this.shrinkerIntervalSeconds = shrinkerIntervalSecondsArg;
+    }
+
+    /**
+     * @return Returns the shrinkerIntervalSeconds.
+     */
+    public int getShrinkerIntervalSeconds()
+    {
+        return shrinkerIntervalSeconds;
+    }
+
+    /**
+     * @param useDiskShrinker The useDiskShrinker to set.
+     */
+    public void setUseDiskShrinker( final boolean useDiskShrinker )
+    {
+        this.useDiskShrinker = useDiskShrinker;
+    }
+
+    /**
+     * @return Returns the useDiskShrinker.
+     */
+    public boolean isUseDiskShrinker()
+    {
+        return useDiskShrinker;
+    }
+
+    /**
+     * @param connectionPoolName the connectionPoolName to set
+     */
+    public void setConnectionPoolName( final String connectionPoolName )
+    {
+        this.connectionPoolName = connectionPoolName;
+    }
+
+    /**
+     * @return the connectionPoolName
+     */
+    public String getConnectionPoolName()
+    {
+        return connectionPoolName;
+    }
+
+    /**
+     * For debugging.
+     * <p>
+     * @return debug string with most of the properties.
+     */
+    @Override
+    public String toString()
+    {
+        final StringBuilder buf = new StringBuilder();
+        buf.append( "\nJDBCCacheAttributes" );
+        buf.append( "\n UserName [" + getUserName() + "]" );
+        buf.append( "\n Url [" + getUrl() + "]" );
+        buf.append( "\n Database [" + getDatabase() + "]" );
+        buf.append( "\n DriverClassName [" + getDriverClassName() + "]" );
+        buf.append( "\n TableName [" + getTableName() + "]" );
+        buf.append( "\n TestBeforeInsert [" + isTestBeforeInsert() + "]" );
+        buf.append( "\n MaxActive [" + getMaxTotal() + "]" );
+        buf.append( "\n AllowRemoveAll [" + isAllowRemoveAll() + "]" );
+        buf.append( "\n ShrinkerIntervalSeconds [" + getShrinkerIntervalSeconds() + "]" );
+        buf.append( "\n useDiskShrinker [" + isUseDiskShrinker() + "]" );
+        return buf.toString();
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/JDBCDiskCacheFactory.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/JDBCDiskCacheFactory.java
index 42d76266..5545cf51 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/JDBCDiskCacheFactory.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/JDBCDiskCacheFactory.java
@@ -1,264 +1,262 @@
-package org.apache.commons.jcs3.auxiliary.disk.jdbc;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.sql.SQLException;
-import java.util.Properties;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.jcs3.auxiliary.AbstractAuxiliaryCacheFactory;
-import org.apache.commons.jcs3.auxiliary.AuxiliaryCacheAttributes;
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.DataSourceFactory;
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.JndiDataSourceFactory;
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.SharedPoolDataSourceFactory;
-import org.apache.commons.jcs3.engine.behavior.ICompositeCacheManager;
-import org.apache.commons.jcs3.engine.behavior.IElementSerializer;
-import org.apache.commons.jcs3.engine.behavior.IRequireScheduler;
-import org.apache.commons.jcs3.engine.logging.behavior.ICacheEventLogger;
-import org.apache.commons.jcs3.log.Log;
-import org.apache.commons.jcs3.log.LogManager;
-import org.apache.commons.jcs3.utils.config.PropertySetter;
-
-/**
- * This factory should create JDBC auxiliary caches.
- * <p>
- * @author Aaron Smuts
- */
-public class JDBCDiskCacheFactory
-    extends AbstractAuxiliaryCacheFactory
-    implements IRequireScheduler
-{
-    /** The logger */
-    private static final Log log = LogManager.getLog( JDBCDiskCacheFactory.class );
-
-    /**
-     * A map of TableState objects to table names. Each cache has a table state object, which is
-     * used to determine if any long processes such as deletes or optimizations are running.
-     */
-    private ConcurrentMap<String, TableState> tableStates;
-
-    /** The background scheduler, one for all regions. Injected by the configurator */
-    protected ScheduledExecutorService scheduler;
-
-    /**
-     * A map of table name to shrinker threads. This allows each table to have a different setting.
-     * It assumes that there is only one jdbc disk cache auxiliary defined per table.
-     */
-    private ConcurrentMap<String, ShrinkerThread> shrinkerThreadMap;
-
-    /** Pool name to DataSourceFactories */
-    private ConcurrentMap<String, DataSourceFactory> dsFactories;
-
-    /** props prefix */
-    protected static final String POOL_CONFIGURATION_PREFIX = "jcs.jdbcconnectionpool.";
-
-    /** .attributes */
-    protected static final String ATTRIBUTE_PREFIX = ".attributes";
-
-    /**
-     * This factory method should create an instance of the jdbc cache.
-     * <p>
-     * @param rawAttr specific cache configuration attributes
-     * @param compositeCacheManager the global cache manager
-     * @param cacheEventLogger a specific logger for cache events
-     * @param elementSerializer a serializer for cache elements
-     * @return JDBCDiskCache the cache instance
-     * @throws SQLException if the cache instance could not be created
-     */
-    @Override
-    public <K, V> JDBCDiskCache<K, V> createCache( final AuxiliaryCacheAttributes rawAttr,
-            final ICompositeCacheManager compositeCacheManager,
-            final ICacheEventLogger cacheEventLogger, final IElementSerializer elementSerializer )
-            throws SQLException
-    {
-        final JDBCDiskCacheAttributes cattr = (JDBCDiskCacheAttributes) rawAttr;
-        final TableState tableState = getTableState( cattr.getTableName() );
-        final DataSourceFactory dsFactory = getDataSourceFactory(cattr, compositeCacheManager.getConfigurationProperties());
-
-        final JDBCDiskCache<K, V> cache = new JDBCDiskCache<>(cattr, dsFactory, tableState);
-        cache.setCacheEventLogger( cacheEventLogger );
-        cache.setElementSerializer( elementSerializer );
-
-        // create a shrinker if we need it.
-        createShrinkerWhenNeeded( cattr, cache );
-
-        return cache;
-    }
-
-    /**
-     * Initialize this factory
-     */
-    @Override
-    public void initialize()
-    {
-        super.initialize();
-        this.tableStates = new ConcurrentHashMap<>();
-        this.shrinkerThreadMap = new ConcurrentHashMap<>();
-        this.dsFactories = new ConcurrentHashMap<>();
-    }
-
-    /**
-     * Dispose of this factory, clean up shared resources
-     */
-    @Override
-    public void dispose()
-    {
-        this.tableStates.clear();
-
-        for (final DataSourceFactory dsFactory : this.dsFactories.values())
-        {
-        	try
-        	{
-				dsFactory.close();
-			}
-        	catch (final SQLException e)
-        	{
-        		log.error("Could not close data source factory {0}", dsFactory.getName(), e);
-			}
-        }
-
-        this.dsFactories.clear();
-        this.shrinkerThreadMap.clear();
-        super.dispose();
-    }
-
-    /**
-     * Get a table state for a given table name
-     *
-     * @param tableName
-     * @return a cached instance of the table state
-     */
-    protected TableState getTableState(final String tableName)
-    {
-        return tableStates.computeIfAbsent(tableName, TableState::new);
-    }
-
-    /**
-	 * @see org.apache.commons.jcs3.engine.behavior.IRequireScheduler#setScheduledExecutorService(java.util.concurrent.ScheduledExecutorService)
-	 */
-	@Override
-	public void setScheduledExecutorService(final ScheduledExecutorService scheduledExecutor)
-	{
-		this.scheduler = scheduledExecutor;
-	}
-
-	/**
-     * Get the scheduler service
-     *
-     * @return the scheduler
-     */
-    protected ScheduledExecutorService getScheduledExecutorService()
-    {
-        return scheduler;
-    }
-
-    /**
-     * If UseDiskShrinker is true then we will create a shrinker daemon if necessary.
-     * <p>
-     * @param cattr
-     * @param raf
-     */
-    protected void createShrinkerWhenNeeded( final JDBCDiskCacheAttributes cattr, final JDBCDiskCache<?, ?> raf )
-    {
-        // add cache to shrinker.
-        if ( cattr.isUseDiskShrinker() )
-        {
-            final ScheduledExecutorService shrinkerService = getScheduledExecutorService();
-            final ShrinkerThread shrinkerThread = shrinkerThreadMap.computeIfAbsent(cattr.getTableName(), key -> {
-                final ShrinkerThread newShrinkerThread = new ShrinkerThread();
-
-                final long intervalMillis = Math.max( 999, cattr.getShrinkerIntervalSeconds() * 1000 );
-                log.info( "Setting the shrinker to run every [{0}] ms. for table [{1}]",
-                        intervalMillis, key );
-                shrinkerService.scheduleAtFixedRate(newShrinkerThread, 0, intervalMillis, TimeUnit.MILLISECONDS);
-
-                return newShrinkerThread;
-            });
-
-            shrinkerThread.addDiskCacheToShrinkList( raf );
-        }
-    }
-
-    /**
-     * manages the DataSourceFactories.
-     * <p>
-     * @param cattr the cache configuration
-     * @param configProps the configuration properties object
-     * @return a DataSourceFactory
-     * @throws SQLException if a database access error occurs
-     */
-    protected DataSourceFactory getDataSourceFactory( final JDBCDiskCacheAttributes cattr,
-                                                      final Properties configProps ) throws SQLException
-    {
-    	String poolName = null;
-
-    	if (cattr.getConnectionPoolName() == null)
-    	{
-    		poolName = cattr.getCacheName() + "." + JDBCDiskCacheAttributes.DEFAULT_POOL_NAME;
-        }
-        else
-        {
-            poolName = cattr.getConnectionPoolName();
-        }
-
-
-    	return this.dsFactories.computeIfAbsent(poolName, key -> {
-    	    final DataSourceFactory newDsFactory;
-            JDBCDiskCacheAttributes dsConfig;
-
-            if (cattr.getConnectionPoolName() == null)
-            {
-                dsConfig = cattr;
-            }
-            else
-            {
-                dsConfig = new JDBCDiskCacheAttributes();
-                final String dsConfigAttributePrefix = POOL_CONFIGURATION_PREFIX + key + ATTRIBUTE_PREFIX;
-                PropertySetter.setProperties( dsConfig,
-                        configProps,
-                        dsConfigAttributePrefix + "." );
-
-                dsConfig.setConnectionPoolName(key);
-            }
-
-            if ( dsConfig.getJndiPath() != null )
-            {
-                newDsFactory = new JndiDataSourceFactory();
-            }
-            else
-            {
-                newDsFactory = new SharedPoolDataSourceFactory();
-            }
-
-            try
-            {
-                newDsFactory.initialize(dsConfig);
-            }
-            catch (final SQLException e)
-            {
-                throw new RuntimeException(e);
-            }
-    	    return newDsFactory;
-    	});
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.jdbc;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.sql.SQLException;
+import java.util.Properties;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.jcs3.auxiliary.AbstractAuxiliaryCacheFactory;
+import org.apache.commons.jcs3.auxiliary.AuxiliaryCacheAttributes;
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.DataSourceFactory;
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.JndiDataSourceFactory;
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.SharedPoolDataSourceFactory;
+import org.apache.commons.jcs3.engine.behavior.ICompositeCacheManager;
+import org.apache.commons.jcs3.engine.behavior.IElementSerializer;
+import org.apache.commons.jcs3.engine.behavior.IRequireScheduler;
+import org.apache.commons.jcs3.engine.logging.behavior.ICacheEventLogger;
+import org.apache.commons.jcs3.log.Log;
+import org.apache.commons.jcs3.log.LogManager;
+import org.apache.commons.jcs3.utils.config.PropertySetter;
+
+/**
+ * This factory should create JDBC auxiliary caches.
+ */
+public class JDBCDiskCacheFactory
+    extends AbstractAuxiliaryCacheFactory
+    implements IRequireScheduler
+{
+    /** The logger */
+    private static final Log log = LogManager.getLog( JDBCDiskCacheFactory.class );
+
+    /**
+     * A map of TableState objects to table names. Each cache has a table state object, which is
+     * used to determine if any long processes such as deletes or optimizations are running.
+     */
+    private ConcurrentMap<String, TableState> tableStates;
+
+    /** The background scheduler, one for all regions. Injected by the configurator */
+    protected ScheduledExecutorService scheduler;
+
+    /**
+     * A map of table name to shrinker threads. This allows each table to have a different setting.
+     * It assumes that there is only one jdbc disk cache auxiliary defined per table.
+     */
+    private ConcurrentMap<String, ShrinkerThread> shrinkerThreadMap;
+
+    /** Pool name to DataSourceFactories */
+    private ConcurrentMap<String, DataSourceFactory> dsFactories;
+
+    /** props prefix */
+    protected static final String POOL_CONFIGURATION_PREFIX = "jcs.jdbcconnectionpool.";
+
+    /** .attributes */
+    protected static final String ATTRIBUTE_PREFIX = ".attributes";
+
+    /**
+     * This factory method should create an instance of the jdbc cache.
+     * <p>
+     * @param rawAttr specific cache configuration attributes
+     * @param compositeCacheManager the global cache manager
+     * @param cacheEventLogger a specific logger for cache events
+     * @param elementSerializer a serializer for cache elements
+     * @return JDBCDiskCache the cache instance
+     * @throws SQLException if the cache instance could not be created
+     */
+    @Override
+    public <K, V> JDBCDiskCache<K, V> createCache( final AuxiliaryCacheAttributes rawAttr,
+            final ICompositeCacheManager compositeCacheManager,
+            final ICacheEventLogger cacheEventLogger, final IElementSerializer elementSerializer )
+            throws SQLException
+    {
+        final JDBCDiskCacheAttributes cattr = (JDBCDiskCacheAttributes) rawAttr;
+        final TableState tableState = getTableState( cattr.getTableName() );
+        final DataSourceFactory dsFactory = getDataSourceFactory(cattr, compositeCacheManager.getConfigurationProperties());
+
+        final JDBCDiskCache<K, V> cache = new JDBCDiskCache<>(cattr, dsFactory, tableState);
+        cache.setCacheEventLogger( cacheEventLogger );
+        cache.setElementSerializer( elementSerializer );
+
+        // create a shrinker if we need it.
+        createShrinkerWhenNeeded( cattr, cache );
+
+        return cache;
+    }
+
+    /**
+     * Initialize this factory
+     */
+    @Override
+    public void initialize()
+    {
+        super.initialize();
+        this.tableStates = new ConcurrentHashMap<>();
+        this.shrinkerThreadMap = new ConcurrentHashMap<>();
+        this.dsFactories = new ConcurrentHashMap<>();
+    }
+
+    /**
+     * Dispose of this factory, clean up shared resources
+     */
+    @Override
+    public void dispose()
+    {
+        this.tableStates.clear();
+
+        for (final DataSourceFactory dsFactory : this.dsFactories.values())
+        {
+        	try
+        	{
+				dsFactory.close();
+			}
+        	catch (final SQLException e)
+        	{
+        		log.error("Could not close data source factory {0}", dsFactory.getName(), e);
+			}
+        }
+
+        this.dsFactories.clear();
+        this.shrinkerThreadMap.clear();
+        super.dispose();
+    }
+
+    /**
+     * Get a table state for a given table name
+     *
+     * @param tableName
+     * @return a cached instance of the table state
+     */
+    protected TableState getTableState(final String tableName)
+    {
+        return tableStates.computeIfAbsent(tableName, TableState::new);
+    }
+
+    /**
+	 * @see org.apache.commons.jcs3.engine.behavior.IRequireScheduler#setScheduledExecutorService(java.util.concurrent.ScheduledExecutorService)
+	 */
+	@Override
+	public void setScheduledExecutorService(final ScheduledExecutorService scheduledExecutor)
+	{
+		this.scheduler = scheduledExecutor;
+	}
+
+	/**
+     * Get the scheduler service
+     *
+     * @return the scheduler
+     */
+    protected ScheduledExecutorService getScheduledExecutorService()
+    {
+        return scheduler;
+    }
+
+    /**
+     * If UseDiskShrinker is true then we will create a shrinker daemon if necessary.
+     * <p>
+     * @param cattr
+     * @param raf
+     */
+    protected void createShrinkerWhenNeeded( final JDBCDiskCacheAttributes cattr, final JDBCDiskCache<?, ?> raf )
+    {
+        // add cache to shrinker.
+        if ( cattr.isUseDiskShrinker() )
+        {
+            final ScheduledExecutorService shrinkerService = getScheduledExecutorService();
+            final ShrinkerThread shrinkerThread = shrinkerThreadMap.computeIfAbsent(cattr.getTableName(), key -> {
+                final ShrinkerThread newShrinkerThread = new ShrinkerThread();
+
+                final long intervalMillis = Math.max( 999, cattr.getShrinkerIntervalSeconds() * 1000 );
+                log.info( "Setting the shrinker to run every [{0}] ms. for table [{1}]",
+                        intervalMillis, key );
+                shrinkerService.scheduleAtFixedRate(newShrinkerThread, 0, intervalMillis, TimeUnit.MILLISECONDS);
+
+                return newShrinkerThread;
+            });
+
+            shrinkerThread.addDiskCacheToShrinkList( raf );
+        }
+    }
+
+    /**
+     * manages the DataSourceFactories.
+     * <p>
+     * @param cattr the cache configuration
+     * @param configProps the configuration properties object
+     * @return a DataSourceFactory
+     * @throws SQLException if a database access error occurs
+     */
+    protected DataSourceFactory getDataSourceFactory( final JDBCDiskCacheAttributes cattr,
+                                                      final Properties configProps ) throws SQLException
+    {
+    	String poolName = null;
+
+    	if (cattr.getConnectionPoolName() == null)
+    	{
+    		poolName = cattr.getCacheName() + "." + JDBCDiskCacheAttributes.DEFAULT_POOL_NAME;
+        }
+        else
+        {
+            poolName = cattr.getConnectionPoolName();
+        }
+
+
+    	return this.dsFactories.computeIfAbsent(poolName, key -> {
+    	    final DataSourceFactory newDsFactory;
+            JDBCDiskCacheAttributes dsConfig;
+
+            if (cattr.getConnectionPoolName() == null)
+            {
+                dsConfig = cattr;
+            }
+            else
+            {
+                dsConfig = new JDBCDiskCacheAttributes();
+                final String dsConfigAttributePrefix = POOL_CONFIGURATION_PREFIX + key + ATTRIBUTE_PREFIX;
+                PropertySetter.setProperties( dsConfig,
+                        configProps,
+                        dsConfigAttributePrefix + "." );
+
+                dsConfig.setConnectionPoolName(key);
+            }
+
+            if ( dsConfig.getJndiPath() != null )
+            {
+                newDsFactory = new JndiDataSourceFactory();
+            }
+            else
+            {
+                newDsFactory = new SharedPoolDataSourceFactory();
+            }
+
+            try
+            {
+                newDsFactory.initialize(dsConfig);
+            }
+            catch (final SQLException e)
+            {
+                throw new RuntimeException(e);
+            }
+    	    return newDsFactory;
+    	});
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/ShrinkerThread.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/ShrinkerThread.java
index cf687895..6068433e 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/ShrinkerThread.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/ShrinkerThread.java
@@ -1,148 +1,146 @@
-package org.apache.commons.jcs3.auxiliary.disk.jdbc;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.util.Iterator;
-import java.util.concurrent.CopyOnWriteArraySet;
-
-import org.apache.commons.jcs3.log.Log;
-import org.apache.commons.jcs3.log.LogManager;
-import org.apache.commons.jcs3.utils.timing.ElapsedTimer;
-
-/**
- * Calls delete expired on the disk caches. The shrinker is run by a clock daemon. The shrinker
- * calls delete on each region. It pauses between calls.
- * <p>
- * @author Aaron Smuts
- */
-public class ShrinkerThread
-    implements Runnable
-{
-    /** The logger. */
-    private static final Log log = LogManager.getLog( ShrinkerThread.class );
-
-    /** A set of JDBCDiskCache objects to call deleteExpired on. */
-    private final CopyOnWriteArraySet<JDBCDiskCache<?, ?>> shrinkSet =
-            new CopyOnWriteArraySet<>();
-
-    /** Default time period to use. */
-    private static final long DEFAULT_PAUSE_BETWEEN_REGION_CALLS_MILLIS = 5000;
-
-    /**
-     * How long should we wait between calls to deleteExpired when we are iterating through the list
-     * of regions. Delete can lock the table. We want to give clients a chance to get some work
-     * done.
-     */
-    private long pauseBetweenRegionCallsMillis = DEFAULT_PAUSE_BETWEEN_REGION_CALLS_MILLIS;
-
-    /**
-     * Does nothing special.
-     */
-    protected ShrinkerThread()
-    {
-    }
-
-    /**
-     * Adds a JDBC disk cache to the set of disk cache to shrink.
-     * <p>
-     * @param diskCache
-     */
-    public void addDiskCacheToShrinkList( final JDBCDiskCache<?, ?> diskCache )
-    {
-        // the set will prevent dupes.
-        // we could also just add these to a hashmap by region name
-        // but that might cause a problem if you wanted to use two different
-        // jbdc disk caches for the same region.
-        shrinkSet.add( diskCache );
-    }
-
-    /**
-     * Calls deleteExpired on each item in the set. It pauses between each call.
-     */
-    @Override
-    public void run()
-    {
-        try
-        {
-            deleteExpiredFromAllRegisteredRegions();
-        }
-        catch ( final Throwable e )
-        {
-            log.error( "Caught an exception while trying to delete expired items.", e );
-        }
-    }
-
-    /**
-     * Deletes the expired items from all the registered regions.
-     */
-    private void deleteExpiredFromAllRegisteredRegions()
-    {
-        log.info( "Running JDBC disk cache shrinker. Number of regions [{0}]",
-                shrinkSet::size);
-
-        for (final Iterator<JDBCDiskCache<?, ?>> i = shrinkSet.iterator(); i.hasNext();)
-        {
-            final JDBCDiskCache<?, ?> cache = i.next();
-            final ElapsedTimer timer = new ElapsedTimer();
-            final int deleted = cache.deleteExpired();
-
-            log.info( "Deleted [{0}] expired for region [{1}] for table [{2}] in {3} ms.",
-                    deleted, cache.getCacheName(), cache.getTableName(), timer.getElapsedTime() );
-
-            // don't pause after the last call to delete expired.
-            if ( i.hasNext() )
-            {
-                log.info( "Pausing for [{0}] ms before shrinking the next region.",
-                        this.getPauseBetweenRegionCallsMillis() );
-
-                try
-                {
-                    Thread.sleep( this.getPauseBetweenRegionCallsMillis() );
-                }
-                catch ( final InterruptedException e )
-                {
-                    log.warn( "Interrupted while waiting to delete expired for the next region." );
-                }
-            }
-        }
-    }
-
-    /**
-     * How long should we wait between calls to deleteExpired when we are iterating through the list
-     * of regions.
-     * <p>
-     * @param pauseBetweenRegionCallsMillis The pauseBetweenRegionCallsMillis to set.
-     */
-    public void setPauseBetweenRegionCallsMillis( final long pauseBetweenRegionCallsMillis )
-    {
-        this.pauseBetweenRegionCallsMillis = pauseBetweenRegionCallsMillis;
-    }
-
-    /**
-     * How long should we wait between calls to deleteExpired when we are iterating through the list
-     * of regions.
-     * <p>
-     * @return Returns the pauseBetweenRegionCallsMillis.
-     */
-    public long getPauseBetweenRegionCallsMillis()
-    {
-        return pauseBetweenRegionCallsMillis;
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.jdbc;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.Iterator;
+import java.util.concurrent.CopyOnWriteArraySet;
+
+import org.apache.commons.jcs3.log.Log;
+import org.apache.commons.jcs3.log.LogManager;
+import org.apache.commons.jcs3.utils.timing.ElapsedTimer;
+
+/**
+ * Calls delete expired on the disk caches. The shrinker is run by a clock daemon. The shrinker
+ * calls delete on each region. It pauses between calls.
+ */
+public class ShrinkerThread
+    implements Runnable
+{
+    /** The logger. */
+    private static final Log log = LogManager.getLog( ShrinkerThread.class );
+
+    /** A set of JDBCDiskCache objects to call deleteExpired on. */
+    private final CopyOnWriteArraySet<JDBCDiskCache<?, ?>> shrinkSet =
+            new CopyOnWriteArraySet<>();
+
+    /** Default time period to use. */
+    private static final long DEFAULT_PAUSE_BETWEEN_REGION_CALLS_MILLIS = 5000;
+
+    /**
+     * How long should we wait between calls to deleteExpired when we are iterating through the list
+     * of regions. Delete can lock the table. We want to give clients a chance to get some work
+     * done.
+     */
+    private long pauseBetweenRegionCallsMillis = DEFAULT_PAUSE_BETWEEN_REGION_CALLS_MILLIS;
+
+    /**
+     * Does nothing special.
+     */
+    protected ShrinkerThread()
+    {
+    }
+
+    /**
+     * Adds a JDBC disk cache to the set of disk cache to shrink.
+     * <p>
+     * @param diskCache
+     */
+    public void addDiskCacheToShrinkList( final JDBCDiskCache<?, ?> diskCache )
+    {
+        // the set will prevent dupes.
+        // we could also just add these to a hashmap by region name
+        // but that might cause a problem if you wanted to use two different
+        // jbdc disk caches for the same region.
+        shrinkSet.add( diskCache );
+    }
+
+    /**
+     * Calls deleteExpired on each item in the set. It pauses between each call.
+     */
+    @Override
+    public void run()
+    {
+        try
+        {
+            deleteExpiredFromAllRegisteredRegions();
+        }
+        catch ( final Throwable e )
+        {
+            log.error( "Caught an exception while trying to delete expired items.", e );
+        }
+    }
+
+    /**
+     * Deletes the expired items from all the registered regions.
+     */
+    private void deleteExpiredFromAllRegisteredRegions()
+    {
+        log.info( "Running JDBC disk cache shrinker. Number of regions [{0}]",
+                shrinkSet::size);
+
+        for (final Iterator<JDBCDiskCache<?, ?>> i = shrinkSet.iterator(); i.hasNext();)
+        {
+            final JDBCDiskCache<?, ?> cache = i.next();
+            final ElapsedTimer timer = new ElapsedTimer();
+            final int deleted = cache.deleteExpired();
+
+            log.info( "Deleted [{0}] expired for region [{1}] for table [{2}] in {3} ms.",
+                    deleted, cache.getCacheName(), cache.getTableName(), timer.getElapsedTime() );
+
+            // don't pause after the last call to delete expired.
+            if ( i.hasNext() )
+            {
+                log.info( "Pausing for [{0}] ms before shrinking the next region.",
+                        this.getPauseBetweenRegionCallsMillis() );
+
+                try
+                {
+                    Thread.sleep( this.getPauseBetweenRegionCallsMillis() );
+                }
+                catch ( final InterruptedException e )
+                {
+                    log.warn( "Interrupted while waiting to delete expired for the next region." );
+                }
+            }
+        }
+    }
+
+    /**
+     * How long should we wait between calls to deleteExpired when we are iterating through the list
+     * of regions.
+     * <p>
+     * @param pauseBetweenRegionCallsMillis The pauseBetweenRegionCallsMillis to set.
+     */
+    public void setPauseBetweenRegionCallsMillis( final long pauseBetweenRegionCallsMillis )
+    {
+        this.pauseBetweenRegionCallsMillis = pauseBetweenRegionCallsMillis;
+    }
+
+    /**
+     * How long should we wait between calls to deleteExpired when we are iterating through the list
+     * of regions.
+     * <p>
+     * @return Returns the pauseBetweenRegionCallsMillis.
+     */
+    public long getPauseBetweenRegionCallsMillis()
+    {
+        return pauseBetweenRegionCallsMillis;
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/TableState.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/TableState.java
index 4786c662..d169d30f 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/TableState.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/TableState.java
@@ -1,114 +1,112 @@
-package org.apache.commons.jcs3.auxiliary.disk.jdbc;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.io.Serializable;
-
-/**
- * This is used by various elements of the JDBC disk cache to indicate the
- * status of a table. The MySQL disk cache, for instance, marks the status as
- * optimizing when a scheduled optimization is taking place. This allows the
- * cache to balk rather than block during long running optimizations.
- * <p>
- * @author Aaron Smuts
- */
-public class TableState
-    implements Serializable
-{
-    /** Don't change. */
-    private static final long serialVersionUID = -6625081552084964885L;
-
-    /** Name of the table whose state this reflects. */
-    private String tableName;
-
-    /**
-     * The table is free. It can be accessed and no potentially table locking
-     * jobs are running.
-     */
-    public static final int FREE = 0;
-
-    /** A potentially table locking deletion is running */
-    public static final int DELETE_RUNNING = 1;
-
-    /** A table locking optimization is running. */
-    public static final int OPTIMIZATION_RUNNING = 2;
-
-    /** we might want to add error */
-    private int state = FREE;
-
-    /**
-     * Construct a usable table state.
-     * <p>
-     * @param tableName
-     */
-    public TableState( final String tableName )
-    {
-        this.setTableName( tableName );
-    }
-
-    /**
-     * @param tableName
-     *            The tableName to set.
-     */
-    public void setTableName( final String tableName )
-    {
-        this.tableName = tableName;
-    }
-
-    /**
-     * @return Returns the tableName.
-     */
-    public String getTableName()
-    {
-        return tableName;
-    }
-
-    /**
-     * @param state
-     *            The state to set.
-     */
-    public void setState( final int state )
-    {
-        this.state = state;
-    }
-
-    /**
-     * @return Returns the state.
-     */
-    public int getState()
-    {
-        return state;
-    }
-
-    /**
-     * Write out the values for debugging purposes.
-     * <p>
-     * @return String
-     */
-    @Override
-    public String toString()
-    {
-        final StringBuilder str = new StringBuilder();
-        str.append( "TableState " );
-        str.append( "\n TableName = " + getTableName() );
-        str.append( "\n State = " + getState() );
-        return str.toString();
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.jdbc;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.io.Serializable;
+
+/**
+ * This is used by various elements of the JDBC disk cache to indicate the
+ * status of a table. The MySQL disk cache, for instance, marks the status as
+ * optimizing when a scheduled optimization is taking place. This allows the
+ * cache to balk rather than block during long running optimizations.
+ */
+public class TableState
+    implements Serializable
+{
+    /** Don't change. */
+    private static final long serialVersionUID = -6625081552084964885L;
+
+    /** Name of the table whose state this reflects. */
+    private String tableName;
+
+    /**
+     * The table is free. It can be accessed and no potentially table locking
+     * jobs are running.
+     */
+    public static final int FREE = 0;
+
+    /** A potentially table locking deletion is running */
+    public static final int DELETE_RUNNING = 1;
+
+    /** A table locking optimization is running. */
+    public static final int OPTIMIZATION_RUNNING = 2;
+
+    /** we might want to add error */
+    private int state = FREE;
+
+    /**
+     * Construct a usable table state.
+     * <p>
+     * @param tableName
+     */
+    public TableState( final String tableName )
+    {
+        this.setTableName( tableName );
+    }
+
+    /**
+     * @param tableName
+     *            The tableName to set.
+     */
+    public void setTableName( final String tableName )
+    {
+        this.tableName = tableName;
+    }
+
+    /**
+     * @return Returns the tableName.
+     */
+    public String getTableName()
+    {
+        return tableName;
+    }
+
+    /**
+     * @param state
+     *            The state to set.
+     */
+    public void setState( final int state )
+    {
+        this.state = state;
+    }
+
+    /**
+     * @return Returns the state.
+     */
+    public int getState()
+    {
+        return state;
+    }
+
+    /**
+     * Write out the values for debugging purposes.
+     * <p>
+     * @return String
+     */
+    @Override
+    public String toString()
+    {
+        final StringBuilder str = new StringBuilder();
+        str.append( "TableState " );
+        str.append( "\n TableName = " + getTableName() );
+        str.append( "\n State = " + getState() );
+        return str.toString();
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/dsfactory/DataSourceFactory.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/dsfactory/DataSourceFactory.java
index fbb73ac7..eeb9cbf2 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/dsfactory/DataSourceFactory.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/dsfactory/DataSourceFactory.java
@@ -29,10 +29,6 @@ import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCacheAttributes;
 /**
  * A factory that returns a DataSource.
  * Borrowed from Apache DB Torque
- *
- * @author <a href="mailto:jmcnally@apache.org">John McNally</a>
- * @author <a href="mailto:fischer@seitenbau.de">Thomas Fischer</a>
- * @version $Id: DataSourceFactory.java 1336091 2012-05-09 11:09:40Z tfischer $
  */
 public interface DataSourceFactory
 {
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/dsfactory/JndiDataSourceFactory.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/dsfactory/JndiDataSourceFactory.java
index 975f68f4..91f58f12 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/dsfactory/JndiDataSourceFactory.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/dsfactory/JndiDataSourceFactory.java
@@ -1,173 +1,170 @@
-package org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.sql.SQLException;
-import java.util.Hashtable;
-import java.util.Map;
-
-import javax.naming.Context;
-import javax.naming.InitialContext;
-import javax.naming.NamingException;
-import javax.sql.DataSource;
-
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCacheAttributes;
-import org.apache.commons.jcs3.log.Log;
-import org.apache.commons.jcs3.log.LogManager;
-
-/**
- * A factory that looks up the DataSource from JNDI.  It is also able
- * to deploy the DataSource based on properties found in the
- * configuration.
- *
- * This factory tries to avoid excessive context lookups to improve speed.
- * The time between two lookups can be configured. The default is 0 (no cache).
- *
- * Borrowed and adapted from Apache DB Torque
- *
- * @author <a href="mailto:jmcnally@apache.org">John McNally</a>
- * @author <a href="mailto:thomas@vandahl.org">Thomas Vandahl</a>
- */
-public class JndiDataSourceFactory implements DataSourceFactory
-{
-    /** The log. */
-    private static final Log log = LogManager.getLog(JndiDataSourceFactory.class);
-
-    /** The name of the factory. */
-    private String name;
-
-    /** The path to get the resource from. */
-    private String path;
-
-    /** The context to get the resource from. */
-    private Context ctx;
-
-    /** A locally cached copy of the DataSource */
-    private DataSource ds;
-
-    /** Time of last actual lookup action */
-    private long lastLookup;
-
-    /** Time between two lookups */
-    private long ttl; // ms
-
-    /**
-     * @return the name of the factory.
-     */
-    @Override
-	public String getName()
-    {
-    	return name;
-    }
-
-    /**
-     * @see org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.DataSourceFactory#getDataSource()
-     */
-    @Override
-	public DataSource getDataSource() throws SQLException
-    {
-        final long time = System.currentTimeMillis();
-
-        if (ds == null || time - lastLookup > ttl)
-        {
-            try
-            {
-                synchronized (ctx)
-                {
-                    ds = (DataSource) ctx.lookup(path);
-                }
-                lastLookup = time;
-            }
-            catch (final NamingException e)
-            {
-                throw new SQLException(e);
-            }
-        }
-
-        return ds;
-    }
-
-    /**
-     * @see org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.DataSourceFactory#initialize(JDBCDiskCacheAttributes)
-     */
-    @Override
-	public void initialize(final JDBCDiskCacheAttributes config) throws SQLException
-    {
-    	this.name = config.getConnectionPoolName();
-        initJNDI(config);
-    }
-
-    /**
-     * Initializes JNDI.
-     *
-     * @param config where to read the settings from
-     * @throws SQLException if a property set fails
-     */
-    private void initJNDI(final JDBCDiskCacheAttributes config) throws SQLException
-    {
-        log.debug("Starting initJNDI");
-
-        try
-        {
-            this.path = config.getJndiPath();
-            log.debug("JNDI path: {0}", path);
-
-            this.ttl = config.getJndiTTL();
-            log.debug("Time between context lookups: {0}", ttl);
-
-    		final Hashtable<String, Object> env = new Hashtable<>();
-            ctx = new InitialContext(env);
-
-            if (log.isTraceEnabled())
-            {
-            	log.trace("Created new InitialContext");
-            	debugCtx(ctx);
-            }
-        }
-        catch (final NamingException e)
-        {
-            throw new SQLException(e);
-        }
-    }
-
-    /**
-     * Does nothing. We do not want to close a dataSource retrieved from Jndi,
-     * because other applications might use it as well.
-     */
-    @Override
-	public void close()
-    {
-        // do nothing
-    }
-
-    /**
-     *
-     * @param ctx the context
-     * @throws NamingException
-     */
-    private void debugCtx(final Context ctx) throws NamingException
-    {
-        log.trace("InitialContext -------------------------------");
-        final Map<?, ?> env = ctx.getEnvironment();
-        log.trace("Environment properties: {0}", env.size());
-        env.forEach((key, value) -> log.trace("    {0}: {1}", key, value));
-        log.trace("----------------------------------------------");
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.sql.SQLException;
+import java.util.Hashtable;
+import java.util.Map;
+
+import javax.naming.Context;
+import javax.naming.InitialContext;
+import javax.naming.NamingException;
+import javax.sql.DataSource;
+
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCacheAttributes;
+import org.apache.commons.jcs3.log.Log;
+import org.apache.commons.jcs3.log.LogManager;
+
+/**
+ * A factory that looks up the DataSource from JNDI.  It is also able
+ * to deploy the DataSource based on properties found in the
+ * configuration.
+ *
+ * This factory tries to avoid excessive context lookups to improve speed.
+ * The time between two lookups can be configured. The default is 0 (no cache).
+ *
+ * Borrowed and adapted from Apache DB Torque
+ */
+public class JndiDataSourceFactory implements DataSourceFactory
+{
+    /** The log. */
+    private static final Log log = LogManager.getLog(JndiDataSourceFactory.class);
+
+    /** The name of the factory. */
+    private String name;
+
+    /** The path to get the resource from. */
+    private String path;
+
+    /** The context to get the resource from. */
+    private Context ctx;
+
+    /** A locally cached copy of the DataSource */
+    private DataSource ds;
+
+    /** Time of last actual lookup action */
+    private long lastLookup;
+
+    /** Time between two lookups */
+    private long ttl; // ms
+
+    /**
+     * @return the name of the factory.
+     */
+    @Override
+	public String getName()
+    {
+    	return name;
+    }
+
+    /**
+     * @see org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.DataSourceFactory#getDataSource()
+     */
+    @Override
+	public DataSource getDataSource() throws SQLException
+    {
+        final long time = System.currentTimeMillis();
+
+        if (ds == null || time - lastLookup > ttl)
+        {
+            try
+            {
+                synchronized (ctx)
+                {
+                    ds = (DataSource) ctx.lookup(path);
+                }
+                lastLookup = time;
+            }
+            catch (final NamingException e)
+            {
+                throw new SQLException(e);
+            }
+        }
+
+        return ds;
+    }
+
+    /**
+     * @see org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.DataSourceFactory#initialize(JDBCDiskCacheAttributes)
+     */
+    @Override
+	public void initialize(final JDBCDiskCacheAttributes config) throws SQLException
+    {
+    	this.name = config.getConnectionPoolName();
+        initJNDI(config);
+    }
+
+    /**
+     * Initializes JNDI.
+     *
+     * @param config where to read the settings from
+     * @throws SQLException if a property set fails
+     */
+    private void initJNDI(final JDBCDiskCacheAttributes config) throws SQLException
+    {
+        log.debug("Starting initJNDI");
+
+        try
+        {
+            this.path = config.getJndiPath();
+            log.debug("JNDI path: {0}", path);
+
+            this.ttl = config.getJndiTTL();
+            log.debug("Time between context lookups: {0}", ttl);
+
+    		final Hashtable<String, Object> env = new Hashtable<>();
+            ctx = new InitialContext(env);
+
+            if (log.isTraceEnabled())
+            {
+            	log.trace("Created new InitialContext");
+            	debugCtx(ctx);
+            }
+        }
+        catch (final NamingException e)
+        {
+            throw new SQLException(e);
+        }
+    }
+
+    /**
+     * Does nothing. We do not want to close a dataSource retrieved from Jndi,
+     * because other applications might use it as well.
+     */
+    @Override
+	public void close()
+    {
+        // do nothing
+    }
+
+    /**
+     *
+     * @param ctx the context
+     * @throws NamingException
+     */
+    private void debugCtx(final Context ctx) throws NamingException
+    {
+        log.trace("InitialContext -------------------------------");
+        final Map<?, ?> env = ctx.getEnvironment();
+        log.trace("Environment properties: {0}", env.size());
+        env.forEach((key, value) -> log.trace("    {0}: {1}", key, value));
+        log.trace("----------------------------------------------");
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/dsfactory/SharedPoolDataSourceFactory.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/dsfactory/SharedPoolDataSourceFactory.java
index 43f3d3b7..4e817488 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/dsfactory/SharedPoolDataSourceFactory.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/dsfactory/SharedPoolDataSourceFactory.java
@@ -35,9 +35,6 @@ import org.apache.commons.jcs3.log.LogManager;
  * A factory that looks up the DataSource using the JDBC2 pool methods.
  *
  * Borrowed and adapted from Apache DB Torque
- *
- * @author <a href="mailto:jmcnally@apache.org">John McNally</a>
- * @author <a href="mailto:hps@intermeta.de">Henning P. Schmiedehausen</a>
  */
 public class SharedPoolDataSourceFactory implements DataSourceFactory
 {
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/hsql/HSQLDiskCacheFactory.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/hsql/HSQLDiskCacheFactory.java
index d75f6816..45f6b4b7 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/hsql/HSQLDiskCacheFactory.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/hsql/HSQLDiskCacheFactory.java
@@ -1,129 +1,127 @@
-package org.apache.commons.jcs3.auxiliary.disk.jdbc.hsql;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.sql.Connection;
-import java.sql.DatabaseMetaData;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-
-import javax.sql.DataSource;
-
-import org.apache.commons.jcs3.auxiliary.AuxiliaryCacheAttributes;
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCache;
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCacheAttributes;
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCacheFactory;
-import org.apache.commons.jcs3.engine.behavior.ICompositeCacheManager;
-import org.apache.commons.jcs3.engine.behavior.IElementSerializer;
-import org.apache.commons.jcs3.engine.logging.behavior.ICacheEventLogger;
-import org.apache.commons.jcs3.log.Log;
-import org.apache.commons.jcs3.log.LogManager;
-
-/**
- * This factory should create hsql disk caches.
- * <p>
- * @author Aaron Smuts
- */
-public class HSQLDiskCacheFactory
-    extends JDBCDiskCacheFactory
-{
-    /** The logger */
-    private static final Log log = LogManager.getLog( HSQLDiskCacheFactory.class );
-
-    /**
-     * This factory method should create an instance of the hsqlcache.
-     * <p>
-     * @param rawAttr
-     * @param compositeCacheManager
-     * @param cacheEventLogger
-     * @param elementSerializer
-     * @return JDBCDiskCache
-     * @throws SQLException if the creation of the cache instance fails
-     */
-    @Override
-    public <K, V> JDBCDiskCache<K, V> createCache( final AuxiliaryCacheAttributes rawAttr,
-			final ICompositeCacheManager compositeCacheManager,
-			final ICacheEventLogger cacheEventLogger,
-			final IElementSerializer elementSerializer )
-			throws SQLException
-    {
-        // TODO get this from the attributes.
-        System.setProperty( "hsqldb.cache_scale", "8" );
-
-        final JDBCDiskCache<K, V> cache = super.createCache(rawAttr, compositeCacheManager,
-                cacheEventLogger, elementSerializer);
-        setupDatabase( cache.getDataSource(), (JDBCDiskCacheAttributes) rawAttr );
-
-        return cache;
-    }
-
-    /**
-     * Creates the table if it doesn't exist
-     * <p>
-     * @param ds Data Source
-     * @param attributes Cache region configuration
-     * @throws SQLException
-     */
-    protected void setupDatabase( final DataSource ds, final JDBCDiskCacheAttributes attributes )
-        throws SQLException
-    {
-        try (Connection cConn = ds.getConnection())
-        {
-            setupTable( cConn, attributes.getTableName() );
-            log.info( "Finished setting up table [{0}]", attributes.getTableName());
-        }
-    }
-
-    /**
-     * SETUP TABLE FOR CACHE
-     * <p>
-     * @param cConn
-     * @param tableName
-     */
-    protected synchronized void setupTable( final Connection cConn, final String tableName ) throws SQLException
-    {
-        final DatabaseMetaData dmd = cConn.getMetaData();
-        final ResultSet result = dmd.getTables(null, null, tableName, null);
-
-        if (!result.next())
-        {
-            // TODO make the cached nature of the table configurable
-            final StringBuilder createSql = new StringBuilder();
-            createSql.append( "CREATE CACHED TABLE ").append( tableName );
-            createSql.append( "( " );
-            createSql.append( "CACHE_KEY             VARCHAR(250)          NOT NULL, " );
-            createSql.append( "REGION                VARCHAR(250)          NOT NULL, " );
-            createSql.append( "ELEMENT               BINARY, " );
-            createSql.append( "CREATE_TIME           TIMESTAMP, " );
-            createSql.append( "UPDATE_TIME_SECONDS   BIGINT, " );
-            createSql.append( "MAX_LIFE_SECONDS      BIGINT, " );
-            createSql.append( "SYSTEM_EXPIRE_TIME_SECONDS      BIGINT, " );
-            createSql.append( "IS_ETERNAL            CHAR(1), " );
-            createSql.append( "PRIMARY KEY (CACHE_KEY, REGION) " );
-            createSql.append( ");" );
-
-            try (Statement sStatement = cConn.createStatement())
-            {
-                sStatement.execute( createSql.toString() );
-            }
-        }
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.jdbc.hsql;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+import javax.sql.DataSource;
+
+import org.apache.commons.jcs3.auxiliary.AuxiliaryCacheAttributes;
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCache;
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCacheAttributes;
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCacheFactory;
+import org.apache.commons.jcs3.engine.behavior.ICompositeCacheManager;
+import org.apache.commons.jcs3.engine.behavior.IElementSerializer;
+import org.apache.commons.jcs3.engine.logging.behavior.ICacheEventLogger;
+import org.apache.commons.jcs3.log.Log;
+import org.apache.commons.jcs3.log.LogManager;
+
+/**
+ * This factory should create hsql disk caches.
+ */
+public class HSQLDiskCacheFactory
+    extends JDBCDiskCacheFactory
+{
+    /** The logger */
+    private static final Log log = LogManager.getLog( HSQLDiskCacheFactory.class );
+
+    /**
+     * This factory method should create an instance of the hsqlcache.
+     * <p>
+     * @param rawAttr
+     * @param compositeCacheManager
+     * @param cacheEventLogger
+     * @param elementSerializer
+     * @return JDBCDiskCache
+     * @throws SQLException if the creation of the cache instance fails
+     */
+    @Override
+    public <K, V> JDBCDiskCache<K, V> createCache( final AuxiliaryCacheAttributes rawAttr,
+			final ICompositeCacheManager compositeCacheManager,
+			final ICacheEventLogger cacheEventLogger,
+			final IElementSerializer elementSerializer )
+			throws SQLException
+    {
+        // TODO get this from the attributes.
+        System.setProperty( "hsqldb.cache_scale", "8" );
+
+        final JDBCDiskCache<K, V> cache = super.createCache(rawAttr, compositeCacheManager,
+                cacheEventLogger, elementSerializer);
+        setupDatabase( cache.getDataSource(), (JDBCDiskCacheAttributes) rawAttr );
+
+        return cache;
+    }
+
+    /**
+     * Creates the table if it doesn't exist
+     * <p>
+     * @param ds Data Source
+     * @param attributes Cache region configuration
+     * @throws SQLException
+     */
+    protected void setupDatabase( final DataSource ds, final JDBCDiskCacheAttributes attributes )
+        throws SQLException
+    {
+        try (Connection cConn = ds.getConnection())
+        {
+            setupTable( cConn, attributes.getTableName() );
+            log.info( "Finished setting up table [{0}]", attributes.getTableName());
+        }
+    }
+
+    /**
+     * SETUP TABLE FOR CACHE
+     * <p>
+     * @param cConn
+     * @param tableName
+     */
+    protected synchronized void setupTable( final Connection cConn, final String tableName ) throws SQLException
+    {
+        final DatabaseMetaData dmd = cConn.getMetaData();
+        final ResultSet result = dmd.getTables(null, null, tableName, null);
+
+        if (!result.next())
+        {
+            // TODO make the cached nature of the table configurable
+            final StringBuilder createSql = new StringBuilder();
+            createSql.append( "CREATE CACHED TABLE ").append( tableName );
+            createSql.append( "( " );
+            createSql.append( "CACHE_KEY             VARCHAR(250)          NOT NULL, " );
+            createSql.append( "REGION                VARCHAR(250)          NOT NULL, " );
+            createSql.append( "ELEMENT               BINARY, " );
+            createSql.append( "CREATE_TIME           TIMESTAMP, " );
+            createSql.append( "UPDATE_TIME_SECONDS   BIGINT, " );
+            createSql.append( "MAX_LIFE_SECONDS      BIGINT, " );
+            createSql.append( "SYSTEM_EXPIRE_TIME_SECONDS      BIGINT, " );
+            createSql.append( "IS_ETERNAL            CHAR(1), " );
+            createSql.append( "PRIMARY KEY (CACHE_KEY, REGION) " );
+            createSql.append( ");" );
+
+            try (Statement sStatement = cConn.createStatement())
+            {
+                sStatement.execute( createSql.toString() );
+            }
+        }
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLDiskCache.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLDiskCache.java
index d1ad058d..443efa8a 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLDiskCache.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLDiskCache.java
@@ -1,155 +1,154 @@
-package org.apache.commons.jcs3.auxiliary.disk.jdbc.mysql;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.sql.SQLException;
-import java.util.Map;
-
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCache;
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.TableState;
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.DataSourceFactory;
-import org.apache.commons.jcs3.engine.behavior.ICacheElement;
-import org.apache.commons.jcs3.log.Log;
-import org.apache.commons.jcs3.log.LogManager;
-
-/**
- * The MySQLDiskCache extends the core JDBCDiskCache.
- * <p>
- * Although the generic JDBC Disk Cache can be used for MySQL, the MySQL JDBC Disk Cache has
- * additional features, such as table optimization that are particular to MySQL.
- * <p>
- * @author Aaron Smuts
- */
-public class MySQLDiskCache<K, V>
-	extends JDBCDiskCache<K, V>
-{
-    /** local logger */
-    private static final Log log = LogManager.getLog( MySQLDiskCache.class );
-
-    /** config attributes */
-    private final MySQLDiskCacheAttributes mySQLDiskCacheAttributes;
-
-    /**
-     * Delegates to the super and makes use of the MySQL specific parameters used for scheduled
-     * optimization.
-     * <p>
-     * @param attributes the configuration object for this cache
-     * @param dsFactory the DataSourceFactory for this cache
-     * @param tableState an object to track table operations
-     * @throws SQLException if the pool access could not be set up
-     */
-    public MySQLDiskCache( final MySQLDiskCacheAttributes attributes, final DataSourceFactory dsFactory,
-    		final TableState tableState) throws SQLException
-    {
-        super( attributes, dsFactory, tableState);
-
-        mySQLDiskCacheAttributes = attributes;
-
-        log.debug( "MySQLDiskCacheAttributes = {0}", attributes );
-    }
-
-    /**
-     * This delegates to the generic JDBC disk cache. If we are currently optimizing, then this
-     * method will balk and return null.
-     * <p>
-     * @param key Key to locate value for.
-     * @return An object matching key, or null.
-     */
-    @Override
-    protected ICacheElement<K, V> processGet( final K key )
-    {
-        if (this.getTableState().getState() == TableState.OPTIMIZATION_RUNNING &&
-            this.mySQLDiskCacheAttributes.isBalkDuringOptimization())
-        {
-            return null;
-        }
-        return super.processGet( key );
-    }
-
-    /**
-     * This delegates to the generic JDBC disk cache. If we are currently optimizing, then this
-     * method will balk and return null.
-     * <p>
-     * @param pattern used for like query.
-     * @return An object matching key, or null.
-     */
-    @Override
-    protected Map<K, ICacheElement<K, V>> processGetMatching( final String pattern )
-    {
-        if (this.getTableState().getState() == TableState.OPTIMIZATION_RUNNING &&
-            this.mySQLDiskCacheAttributes.isBalkDuringOptimization())
-        {
-            return null;
-        }
-        return super.processGetMatching( pattern );
-    }
-
-    /**
-     * @param pattern
-     * @return String to use in the like query.
-     */
-    @Override
-    public String constructLikeParameterFromPattern( final String pattern )
-    {
-        String likePattern = pattern.replaceAll( "\\.\\+", "%" );
-        likePattern = likePattern.replaceAll( "\\.", "_" );
-
-        log.debug( "pattern = [{0}]", likePattern );
-
-        return likePattern;
-    }
-
-    /**
-     * This delegates to the generic JDBC disk cache. If we are currently optimizing, then this
-     * method will balk and do nothing.
-     * <p>
-     * @param element
-     */
-    @Override
-    protected void processUpdate( final ICacheElement<K, V> element )
-    {
-        if (this.getTableState().getState() == TableState.OPTIMIZATION_RUNNING &&
-            this.mySQLDiskCacheAttributes.isBalkDuringOptimization())
-        {
-            return;
-        }
-        super.processUpdate( element );
-    }
-
-    /**
-     * Removed the expired. (now - create time) &gt; max life seconds * 1000
-     * <p>
-     * If we are currently optimizing, then this method will balk and do nothing.
-     * <p>
-     * TODO consider blocking and trying again.
-     * <p>
-     * @return the number deleted
-     */
-    @Override
-    protected int deleteExpired()
-    {
-        if (this.getTableState().getState() == TableState.OPTIMIZATION_RUNNING &&
-            this.mySQLDiskCacheAttributes.isBalkDuringOptimization())
-        {
-            return -1;
-        }
-        return super.deleteExpired();
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.jdbc.mysql;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.sql.SQLException;
+import java.util.Map;
+
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCache;
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.TableState;
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.DataSourceFactory;
+import org.apache.commons.jcs3.engine.behavior.ICacheElement;
+import org.apache.commons.jcs3.log.Log;
+import org.apache.commons.jcs3.log.LogManager;
+
+/**
+ * The MySQLDiskCache extends the core JDBCDiskCache.
+ * <p>
+ * Although the generic JDBC Disk Cache can be used for MySQL, the MySQL JDBC Disk Cache has
+ * additional features, such as table optimization that are particular to MySQL.
+ * </p>
+ */
+public class MySQLDiskCache<K, V>
+	extends JDBCDiskCache<K, V>
+{
+    /** local logger */
+    private static final Log log = LogManager.getLog( MySQLDiskCache.class );
+
+    /** config attributes */
+    private final MySQLDiskCacheAttributes mySQLDiskCacheAttributes;
+
+    /**
+     * Delegates to the super and makes use of the MySQL specific parameters used for scheduled
+     * optimization.
+     * <p>
+     * @param attributes the configuration object for this cache
+     * @param dsFactory the DataSourceFactory for this cache
+     * @param tableState an object to track table operations
+     * @throws SQLException if the pool access could not be set up
+     */
+    public MySQLDiskCache( final MySQLDiskCacheAttributes attributes, final DataSourceFactory dsFactory,
+    		final TableState tableState) throws SQLException
+    {
+        super( attributes, dsFactory, tableState);
+
+        mySQLDiskCacheAttributes = attributes;
+
+        log.debug( "MySQLDiskCacheAttributes = {0}", attributes );
+    }
+
+    /**
+     * This delegates to the generic JDBC disk cache. If we are currently optimizing, then this
+     * method will balk and return null.
+     * <p>
+     * @param key Key to locate value for.
+     * @return An object matching key, or null.
+     */
+    @Override
+    protected ICacheElement<K, V> processGet( final K key )
+    {
+        if (this.getTableState().getState() == TableState.OPTIMIZATION_RUNNING &&
+            this.mySQLDiskCacheAttributes.isBalkDuringOptimization())
+        {
+            return null;
+        }
+        return super.processGet( key );
+    }
+
+    /**
+     * This delegates to the generic JDBC disk cache. If we are currently optimizing, then this
+     * method will balk and return null.
+     * <p>
+     * @param pattern used for like query.
+     * @return An object matching key, or null.
+     */
+    @Override
+    protected Map<K, ICacheElement<K, V>> processGetMatching( final String pattern )
+    {
+        if (this.getTableState().getState() == TableState.OPTIMIZATION_RUNNING &&
+            this.mySQLDiskCacheAttributes.isBalkDuringOptimization())
+        {
+            return null;
+        }
+        return super.processGetMatching( pattern );
+    }
+
+    /**
+     * @param pattern
+     * @return String to use in the like query.
+     */
+    @Override
+    public String constructLikeParameterFromPattern( final String pattern )
+    {
+        String likePattern = pattern.replaceAll( "\\.\\+", "%" );
+        likePattern = likePattern.replaceAll( "\\.", "_" );
+
+        log.debug( "pattern = [{0}]", likePattern );
+
+        return likePattern;
+    }
+
+    /**
+     * This delegates to the generic JDBC disk cache. If we are currently optimizing, then this
+     * method will balk and do nothing.
+     * <p>
+     * @param element
+     */
+    @Override
+    protected void processUpdate( final ICacheElement<K, V> element )
+    {
+        if (this.getTableState().getState() == TableState.OPTIMIZATION_RUNNING &&
+            this.mySQLDiskCacheAttributes.isBalkDuringOptimization())
+        {
+            return;
+        }
+        super.processUpdate( element );
+    }
+
+    /**
+     * Removed the expired. (now - create time) &gt; max life seconds * 1000
+     * <p>
+     * If we are currently optimizing, then this method will balk and do nothing.
+     * <p>
+     * TODO consider blocking and trying again.
+     * <p>
+     * @return the number deleted
+     */
+    @Override
+    protected int deleteExpired()
+    {
+        if (this.getTableState().getState() == TableState.OPTIMIZATION_RUNNING &&
+            this.mySQLDiskCacheAttributes.isBalkDuringOptimization())
+        {
+            return -1;
+        }
+        return super.deleteExpired();
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLDiskCacheAttributes.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLDiskCacheAttributes.java
index ca61e148..b95852ce 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLDiskCacheAttributes.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLDiskCacheAttributes.java
@@ -1,107 +1,105 @@
-package org.apache.commons.jcs3.auxiliary.disk.jdbc.mysql;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCacheAttributes;
-
-/**
- * This has additional attributes that are particular to the MySQL disk cache.
- * <p>
- * @author Aaron Smuts
- */
-public class MySQLDiskCacheAttributes
-    extends JDBCDiskCacheAttributes
-{
-    /** Don't change. */
-    private static final long serialVersionUID = -6535808344813320061L;
-
-    /**
-     * For now this is a simple comma delimited list of HH:MM:SS times to optimize
-     * the table. If none is supplied, then no optimizations will be performed.
-     * <p>
-     * In the future we can add a chron like scheduling system. This is to meet
-     * a pressing current need.
-     * <p>
-     * 03:01,15:00 will cause the optimizer to run at 3 am and at 3 pm.
-     */
-    private String optimizationSchedule;
-
-    /**
-     * If true, we will balk, that is return null during optimization rather than block.
-     */
-    public static final boolean DEFAULT_BALK_DURING_OPTIMIZATION = true;
-
-    /**
-     * If true, we will balk, that is return null during optimization rather than block.
-     * <p>
-     * <a href="http://en.wikipedia.org/wiki/Balking_pattern">Balking</a>
-     */
-    private boolean balkDuringOptimization = DEFAULT_BALK_DURING_OPTIMIZATION;
-
-    /**
-     * @param optimizationSchedule The optimizationSchedule to set.
-     */
-    public void setOptimizationSchedule( final String optimizationSchedule )
-    {
-        this.optimizationSchedule = optimizationSchedule;
-    }
-
-    /**
-     * @return Returns the optimizationSchedule.
-     */
-    public String getOptimizationSchedule()
-    {
-        return optimizationSchedule;
-    }
-
-    /**
-     * @param balkDuringOptimization The balkDuringOptimization to set.
-     */
-    public void setBalkDuringOptimization( final boolean balkDuringOptimization )
-    {
-        this.balkDuringOptimization = balkDuringOptimization;
-    }
-
-    /**
-     * Should we return null while optimizing the table.
-     * <p>
-     * @return Returns the balkDuringOptimization.
-     */
-    public boolean isBalkDuringOptimization()
-    {
-        return balkDuringOptimization;
-    }
-
-    /**
-     * For debugging.
-     * <p>
-     * @return debug string
-     */
-    @Override
-    public String toString()
-    {
-        final StringBuilder buf = new StringBuilder();
-        buf.append( "\nMySQLDiskCacheAttributes" );
-        buf.append( "\n OptimizationSchedule [" + getOptimizationSchedule() + "]" );
-        buf.append( "\n BalkDuringOptimization [" + isBalkDuringOptimization() + "]" );
-        buf.append( super.toString() );
-        return buf.toString();
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.jdbc.mysql;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCacheAttributes;
+
+/**
+ * This has additional attributes that are particular to the MySQL disk cache.
+ */
+public class MySQLDiskCacheAttributes
+    extends JDBCDiskCacheAttributes
+{
+    /** Don't change. */
+    private static final long serialVersionUID = -6535808344813320061L;
+
+    /**
+     * For now this is a simple comma delimited list of HH:MM:SS times to optimize
+     * the table. If none is supplied, then no optimizations will be performed.
+     * <p>
+     * In the future we can add a chron like scheduling system. This is to meet
+     * a pressing current need.
+     * <p>
+     * 03:01,15:00 will cause the optimizer to run at 3 am and at 3 pm.
+     */
+    private String optimizationSchedule;
+
+    /**
+     * If true, we will balk, that is return null during optimization rather than block.
+     */
+    public static final boolean DEFAULT_BALK_DURING_OPTIMIZATION = true;
+
+    /**
+     * If true, we will balk, that is return null during optimization rather than block.
+     * <p>
+     * <a href="http://en.wikipedia.org/wiki/Balking_pattern">Balking</a>
+     */
+    private boolean balkDuringOptimization = DEFAULT_BALK_DURING_OPTIMIZATION;
+
+    /**
+     * @param optimizationSchedule The optimizationSchedule to set.
+     */
+    public void setOptimizationSchedule( final String optimizationSchedule )
+    {
+        this.optimizationSchedule = optimizationSchedule;
+    }
+
+    /**
+     * @return Returns the optimizationSchedule.
+     */
+    public String getOptimizationSchedule()
+    {
+        return optimizationSchedule;
+    }
+
+    /**
+     * @param balkDuringOptimization The balkDuringOptimization to set.
+     */
+    public void setBalkDuringOptimization( final boolean balkDuringOptimization )
+    {
+        this.balkDuringOptimization = balkDuringOptimization;
+    }
+
+    /**
+     * Should we return null while optimizing the table.
+     * <p>
+     * @return Returns the balkDuringOptimization.
+     */
+    public boolean isBalkDuringOptimization()
+    {
+        return balkDuringOptimization;
+    }
+
+    /**
+     * For debugging.
+     * <p>
+     * @return debug string
+     */
+    @Override
+    public String toString()
+    {
+        final StringBuilder buf = new StringBuilder();
+        buf.append( "\nMySQLDiskCacheAttributes" );
+        buf.append( "\n OptimizationSchedule [" + getOptimizationSchedule() + "]" );
+        buf.append( "\n BalkDuringOptimization [" + isBalkDuringOptimization() + "]" );
+        buf.append( super.toString() );
+        return buf.toString();
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLDiskCacheFactory.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLDiskCacheFactory.java
index 2bbcde39..bc0a5366 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLDiskCacheFactory.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLDiskCacheFactory.java
@@ -1,159 +1,155 @@
-package org.apache.commons.jcs3.auxiliary.disk.jdbc.mysql;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.sql.SQLException;
-import java.text.ParseException;
-import java.util.Date;
-import java.util.concurrent.TimeUnit;
-
-import javax.sql.DataSource;
-
-import org.apache.commons.jcs3.auxiliary.AuxiliaryCacheAttributes;
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCacheFactory;
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.TableState;
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.DataSourceFactory;
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.mysql.util.ScheduleParser;
-import org.apache.commons.jcs3.engine.behavior.ICompositeCacheManager;
-import org.apache.commons.jcs3.engine.behavior.IElementSerializer;
-import org.apache.commons.jcs3.engine.logging.behavior.ICacheEventLogger;
-import org.apache.commons.jcs3.log.Log;
-import org.apache.commons.jcs3.log.LogManager;
-
-/**
- * This factory should create mysql disk caches.
- * <p>
- * @author Aaron Smuts
- */
-public class MySQLDiskCacheFactory
-    extends JDBCDiskCacheFactory
-{
-    /** The logger */
-    private static final Log log = LogManager.getLog( MySQLDiskCacheFactory.class );
-
-    /**
-     * This factory method should create an instance of the mysqlcache.
-     * <p>
-     * @param rawAttr specific cache configuration attributes
-     * @param compositeCacheManager the global cache manager
-     * @param cacheEventLogger a specific logger for cache events
-     * @param elementSerializer a serializer for cache elements
-     * @return MySQLDiskCache the cache instance
-     * @throws SQLException if the cache instance could not be created
-     */
-    @Override
-    public <K, V> MySQLDiskCache<K, V> createCache( final AuxiliaryCacheAttributes rawAttr,
-            final ICompositeCacheManager compositeCacheManager,
-            final ICacheEventLogger cacheEventLogger, final IElementSerializer elementSerializer )
-            throws SQLException
-    {
-        final MySQLDiskCacheAttributes cattr = (MySQLDiskCacheAttributes) rawAttr;
-        final TableState tableState = getTableState( cattr.getTableName() );
-        final DataSourceFactory dsFactory = getDataSourceFactory(cattr, compositeCacheManager.getConfigurationProperties());
-
-        final MySQLDiskCache<K, V> cache = new MySQLDiskCache<>( cattr, dsFactory, tableState);
-        cache.setCacheEventLogger( cacheEventLogger );
-        cache.setElementSerializer( elementSerializer );
-
-        // create a shrinker if we need it.
-        createShrinkerWhenNeeded( cattr, cache );
-        scheduleOptimizations( cattr, tableState, cache.getDataSource() );
-
-        return cache;
-
-    }
-
-    /**
-     * For each time in the optimization schedule, this calls schedule Optimization.
-     * <p>
-     * @param attributes configuration properties.
-     * @param tableState for noting optimization in progress, etc.
-     * @param ds the DataSource
-     */
-    protected void scheduleOptimizations( final MySQLDiskCacheAttributes attributes, final TableState tableState, final DataSource ds  )
-    {
-        if ( attributes != null )
-        {
-            if ( attributes.getOptimizationSchedule() != null )
-            {
-                log.info( "Will try to configure optimization for table [{0}] on schedule [{1}]",
-                        attributes::getTableName, attributes::getOptimizationSchedule);
-
-                final MySQLTableOptimizer optimizer = new MySQLTableOptimizer( attributes, tableState, ds );
-
-                // loop through the dates.
-                try
-                {
-                    // can´t be null, otherwise ScheduleParser.createDatesForSchedule will throw ParseException
-                    final Date[] dates = ScheduleParser.createDatesForSchedule( attributes.getOptimizationSchedule() );
-                    for (final Date date : dates) {
-                        this.scheduleOptimization( date, optimizer );
-                    }
-                }
-                catch ( final ParseException e )
-                {
-                    log.warn( "Problem creating optimization schedule for table [{0}]",
-                            attributes.getTableName(), e );
-                }
-            }
-            else
-            {
-                log.info( "Optimization is not configured for table [{0}]",
-                        attributes.getTableName());
-            }
-        }
-    }
-
-    /**
-     * This takes in a single time and schedules the optimizer to be called at that time every day.
-     * <p>
-     * @param startTime -- HH:MM:SS format
-     * @param optimizer
-     */
-    protected void scheduleOptimization( final Date startTime, final MySQLTableOptimizer optimizer )
-    {
-        log.info( "startTime [{0}] for optimizer {1}", startTime, optimizer );
-
-        final Date now = new Date();
-        final long initialDelay = startTime.getTime() - now.getTime();
-
-        // have the daemon execute the optimization
-        getScheduledExecutorService().scheduleAtFixedRate(() -> optimizeTable(optimizer),
-                initialDelay, 86400L, TimeUnit.SECONDS );
-    }
-
-    /**
-     * This calls the optimizers' optimize table method. This is used by the timer.
-     * <p>
-     * @author Aaron Smuts
-     */
-    private void optimizeTable(final MySQLTableOptimizer optimizer)
-    {
-        if ( optimizer != null )
-        {
-            final boolean success = optimizer.optimizeTable();
-            log.info( "Optimization success status [{0}]", success );
-        }
-        else
-        {
-            log.warn( "OptimizerRunner: The optimizer is null. Could not optimize table." );
-        }
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.jdbc.mysql;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.sql.SQLException;
+import java.text.ParseException;
+import java.util.Date;
+import java.util.concurrent.TimeUnit;
+
+import javax.sql.DataSource;
+
+import org.apache.commons.jcs3.auxiliary.AuxiliaryCacheAttributes;
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.JDBCDiskCacheFactory;
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.TableState;
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.dsfactory.DataSourceFactory;
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.mysql.util.ScheduleParser;
+import org.apache.commons.jcs3.engine.behavior.ICompositeCacheManager;
+import org.apache.commons.jcs3.engine.behavior.IElementSerializer;
+import org.apache.commons.jcs3.engine.logging.behavior.ICacheEventLogger;
+import org.apache.commons.jcs3.log.Log;
+import org.apache.commons.jcs3.log.LogManager;
+
+/**
+ * This factory should create mysql disk caches.
+ */
+public class MySQLDiskCacheFactory
+    extends JDBCDiskCacheFactory
+{
+    /** The logger */
+    private static final Log log = LogManager.getLog( MySQLDiskCacheFactory.class );
+
+    /**
+     * This factory method should create an instance of the mysqlcache.
+     * <p>
+     * @param rawAttr specific cache configuration attributes
+     * @param compositeCacheManager the global cache manager
+     * @param cacheEventLogger a specific logger for cache events
+     * @param elementSerializer a serializer for cache elements
+     * @return MySQLDiskCache the cache instance
+     * @throws SQLException if the cache instance could not be created
+     */
+    @Override
+    public <K, V> MySQLDiskCache<K, V> createCache( final AuxiliaryCacheAttributes rawAttr,
+            final ICompositeCacheManager compositeCacheManager,
+            final ICacheEventLogger cacheEventLogger, final IElementSerializer elementSerializer )
+            throws SQLException
+    {
+        final MySQLDiskCacheAttributes cattr = (MySQLDiskCacheAttributes) rawAttr;
+        final TableState tableState = getTableState( cattr.getTableName() );
+        final DataSourceFactory dsFactory = getDataSourceFactory(cattr, compositeCacheManager.getConfigurationProperties());
+
+        final MySQLDiskCache<K, V> cache = new MySQLDiskCache<>( cattr, dsFactory, tableState);
+        cache.setCacheEventLogger( cacheEventLogger );
+        cache.setElementSerializer( elementSerializer );
+
+        // create a shrinker if we need it.
+        createShrinkerWhenNeeded( cattr, cache );
+        scheduleOptimizations( cattr, tableState, cache.getDataSource() );
+
+        return cache;
+
+    }
+
+    /**
+     * For each time in the optimization schedule, this calls schedule Optimization.
+     * <p>
+     * @param attributes configuration properties.
+     * @param tableState for noting optimization in progress, etc.
+     * @param ds the DataSource
+     */
+    protected void scheduleOptimizations( final MySQLDiskCacheAttributes attributes, final TableState tableState, final DataSource ds  )
+    {
+        if ( attributes != null )
+        {
+            if ( attributes.getOptimizationSchedule() != null )
+            {
+                log.info( "Will try to configure optimization for table [{0}] on schedule [{1}]",
+                        attributes::getTableName, attributes::getOptimizationSchedule);
+
+                final MySQLTableOptimizer optimizer = new MySQLTableOptimizer( attributes, tableState, ds );
+
+                // loop through the dates.
+                try
+                {
+                    // can´t be null, otherwise ScheduleParser.createDatesForSchedule will throw ParseException
+                    final Date[] dates = ScheduleParser.createDatesForSchedule( attributes.getOptimizationSchedule() );
+                    for (final Date date : dates) {
+                        this.scheduleOptimization( date, optimizer );
+                    }
+                }
+                catch ( final ParseException e )
+                {
+                    log.warn( "Problem creating optimization schedule for table [{0}]",
+                            attributes.getTableName(), e );
+                }
+            }
+            else
+            {
+                log.info( "Optimization is not configured for table [{0}]",
+                        attributes.getTableName());
+            }
+        }
+    }
+
+    /**
+     * This takes in a single time and schedules the optimizer to be called at that time every day.
+     * <p>
+     * @param startTime -- HH:MM:SS format
+     * @param optimizer
+     */
+    protected void scheduleOptimization( final Date startTime, final MySQLTableOptimizer optimizer )
+    {
+        log.info( "startTime [{0}] for optimizer {1}", startTime, optimizer );
+
+        final Date now = new Date();
+        final long initialDelay = startTime.getTime() - now.getTime();
+
+        // have the daemon execute the optimization
+        getScheduledExecutorService().scheduleAtFixedRate(() -> optimizeTable(optimizer),
+                initialDelay, 86400L, TimeUnit.SECONDS );
+    }
+
+    /**
+     * This calls the optimizers' optimize table method. This is used by the timer.
+     */
+    private void optimizeTable(final MySQLTableOptimizer optimizer)
+    {
+        if ( optimizer != null )
+        {
+            final boolean success = optimizer.optimizeTable();
+            log.info( "Optimization success status [{0}]", success );
+        }
+        else
+        {
+            log.warn( "OptimizerRunner: The optimizer is null. Could not optimize table." );
+        }
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLTableOptimizer.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLTableOptimizer.java
index fa76409f..21585dee 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLTableOptimizer.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/MySQLTableOptimizer.java
@@ -1,287 +1,286 @@
-package org.apache.commons.jcs3.auxiliary.disk.jdbc.mysql;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-
-import javax.sql.DataSource;
-
-import org.apache.commons.jcs3.auxiliary.disk.jdbc.TableState;
-import org.apache.commons.jcs3.log.Log;
-import org.apache.commons.jcs3.log.LogManager;
-import org.apache.commons.jcs3.utils.timing.ElapsedTimer;
-
-/**
- * The MySQL Table Optimizer can optimize MySQL tables. It knows how to optimize for MySQL databases
- * in particular and how to repair the table if it is corrupted in the process.
- * <p>
- * We will probably be able to abstract out a generic optimizer interface from this class in the
- * future.
- * <p>
- * @author Aaron Smuts
- */
-public class MySQLTableOptimizer
-{
-    /** The logger */
-    private static final Log log = LogManager.getLog( MySQLTableOptimizer.class );
-
-    /** The data source */
-    private final DataSource dataSource;
-
-    /** The name of the table. */
-    private String tableName;
-
-    /** optimizing, etc. */
-    private final TableState tableState;
-
-    /**
-     * This constructs an optimizer with the disk can properties.
-     * <p>
-     * @param attributes
-     * @param tableState We mark the table status as optimizing when this is happening.
-     * @param dataSource access to the database
-     */
-    public MySQLTableOptimizer( final MySQLDiskCacheAttributes attributes, final TableState tableState, final DataSource dataSource )
-    {
-        setTableName( attributes.getTableName() );
-
-        this.tableState = tableState;
-        this.dataSource = dataSource;
-    }
-
-    /**
-     * A scheduler will call this method. When it is called the table state is marked as optimizing.
-     * TODO we need to verify that no deletions are running before we call optimize. We should wait
-     * if a deletion is in progress.
-     * <p>
-     * This restores when there is an optimization error. The error output looks like this:
-     *
-     * <pre>
-     *           mysql&gt; optimize table JCS_STORE_FLIGHT_OPTION_ITINERARY;
-     *               +---------------------------------------------+----------+----------+---------------------+
-     *               | Table                                       | Op       | Msg_type | Msg_text            |
-     *               +---------------------------------------------+----------+----------+---------------------+
-     *               | jcs_cache.JCS_STORE_FLIGHT_OPTION_ITINERARY | optimize | error    | 2 when fixing table |
-     *               | jcs_cache.JCS_STORE_FLIGHT_OPTION_ITINERARY | optimize | status   | Operation failed    |
-     *               +---------------------------------------------+----------+----------+---------------------+
-     *               2 rows in set (51.78 sec)
-     * </pre>
-     *
-     * A successful repair response looks like this:
-     *
-     * <pre>
-     *        mysql&gt; REPAIR TABLE JCS_STORE_FLIGHT_OPTION_ITINERARY;
-     *            +---------------------------------------------+--------+----------+----------------------------------------------+
-     *            | Table                                       | Op     | Msg_type | Msg_text                                     |
-     *            +---------------------------------------------+--------+----------+----------------------------------------------+
-     *            | jcs_cache.JCS_STORE_FLIGHT_OPTION_ITINERARY | repair | error    | 2 when fixing table                          |
-     *            | jcs_cache.JCS_STORE_FLIGHT_OPTION_ITINERARY | repair | warning  | Number of rows changed from 131276 to 260461 |
-     *            | jcs_cache.JCS_STORE_FLIGHT_OPTION_ITINERARY | repair | status   | OK                                           |
-     *            +---------------------------------------------+--------+----------+----------------------------------------------+
-     *            3 rows in set (3 min 5.94 sec)
-     * </pre>
-     *
-     * A successful optimization looks like this:
-     *
-     * <pre>
-     *       mysql&gt; optimize table JCS_STORE_DEFAULT;
-     *           +-----------------------------+----------+----------+----------+
-     *           | Table                       | Op       | Msg_type | Msg_text |
-     *           +-----------------------------+----------+----------+----------+
-     *           | jcs_cache.JCS_STORE_DEFAULT | optimize | status   | OK       |
-     *           +-----------------------------+----------+----------+----------+
-     *           1 row in set (1.10 sec)
-     * </pre>
-     * @return true if it worked
-     */
-    public boolean optimizeTable()
-    {
-        final ElapsedTimer timer = new ElapsedTimer();
-        boolean success = false;
-
-        if ( tableState.getState() == TableState.OPTIMIZATION_RUNNING )
-        {
-            log.warn( "Skipping optimization. Optimize was called, but the "
-                    + "table state indicates that an optimization is currently running." );
-            return false;
-        }
-
-        try
-        {
-            tableState.setState( TableState.OPTIMIZATION_RUNNING );
-            log.info( "Optimizing table [{0}]", this.getTableName());
-
-            try (Connection con = dataSource.getConnection())
-            {
-                // TEST
-                try (Statement sStatement = con.createStatement())
-                {
-                    try (ResultSet rs = sStatement.executeQuery( "optimize table " + this.getTableName() ))
-                    {
-                        // first row is error, then status
-                        // if there is only one row in the result set, everything
-                        // should be fine.
-                        // This may be mysql version specific.
-                        if ( rs.next() )
-                        {
-                            final String status = rs.getString( "Msg_type" );
-                            final String message = rs.getString( "Msg_text" );
-
-                            log.info( "Message Type: {0}", status );
-                            log.info( "Message: {0}", message );
-
-                            if ( "error".equals( status ) )
-                            {
-                                log.warn( "Optimization was in error. Will attempt "
-                                        + "to repair the table. Message: {0}", message);
-
-                                // try to repair the table.
-                                success = repairTable( sStatement );
-                            }
-                            else
-                            {
-                                success = true;
-                            }
-                        }
-                    }
-
-                    // log the table status
-                    final String statusString = getTableStatus( sStatement );
-                    log.info( "Table status after optimizing table [{0}]: {1}",
-                            this.getTableName(), statusString );
-                }
-                catch ( final SQLException e )
-                {
-                    log.error( "Problem optimizing table [{0}]",
-                            this.getTableName(), e );
-                    return false;
-                }
-            }
-            catch ( final SQLException e )
-            {
-                log.error( "Problem getting connection.", e );
-            }
-        }
-        finally
-        {
-            tableState.setState( TableState.FREE );
-
-            log.info( "Optimization of table [{0}] took {1} ms.",
-                    this::getTableName, timer::getElapsedTime);
-        }
-
-        return success;
-    }
-
-    /**
-     * This calls show table status and returns the result as a String.
-     * <p>
-     * @param sStatement
-     * @return String
-     * @throws SQLException
-     */
-    protected String getTableStatus( final Statement sStatement )
-        throws SQLException
-    {
-        final StringBuilder statusString = new StringBuilder();
-        try (ResultSet statusResultSet = sStatement.executeQuery( "show table status" ))
-        {
-            final int numColumns = statusResultSet.getMetaData().getColumnCount();
-            while ( statusResultSet.next() )
-            {
-                statusString.append( "\n" );
-                for ( int i = 1; i <= numColumns; i++ )
-                {
-                    statusString.append(statusResultSet.getMetaData().getColumnLabel(i))
-                        .append(" [")
-                        .append(statusResultSet.getString(i))
-                        .append("]  |  ");
-                }
-            }
-        }
-        return statusString.toString();
-    }
-
-    /**
-     * This is called if the optimization is in error.
-     * <p>
-     * It looks for "OK" in response. If it find "OK" as a message in any result set row, it returns
-     * true. Otherwise we assume that the repair failed.
-     * <p>
-     * @param sStatement
-     * @return true if successful
-     * @throws SQLException
-     */
-    protected boolean repairTable( final Statement sStatement )
-        throws SQLException
-    {
-        boolean success = false;
-
-        // if( message != null && message.indexOf( ) )
-        final StringBuilder repairString = new StringBuilder();
-        try (ResultSet repairResult = sStatement.executeQuery( "repair table " + this.getTableName()))
-        {
-            final int numColumns = repairResult.getMetaData().getColumnCount();
-            while ( repairResult.next() )
-            {
-                for ( int i = 1; i <= numColumns; i++ )
-                {
-                    repairString.append(repairResult.getMetaData().getColumnLabel(i))
-                        .append(" [")
-                        .append(repairResult.getString(i))
-                        .append("]  |  ");
-                }
-
-                final String message = repairResult.getString( "Msg_text" );
-                if ( "OK".equals( message ) )
-                {
-                    success = true;
-                }
-            }
-        }
-
-        log.info("{0}", repairString);
-
-        if ( !success )
-        {
-            log.warn( "Failed to repair the table. {0}", repairString );
-        }
-        return success;
-    }
-
-    /**
-     * @param tableName The tableName to set.
-     */
-    public void setTableName( final String tableName )
-    {
-        this.tableName = tableName;
-    }
-
-    /**
-     * @return Returns the tableName.
-     */
-    public String getTableName()
-    {
-        return tableName;
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.jdbc.mysql;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+import javax.sql.DataSource;
+
+import org.apache.commons.jcs3.auxiliary.disk.jdbc.TableState;
+import org.apache.commons.jcs3.log.Log;
+import org.apache.commons.jcs3.log.LogManager;
+import org.apache.commons.jcs3.utils.timing.ElapsedTimer;
+
+/**
+ * The MySQL Table Optimizer can optimize MySQL tables. It knows how to optimize for MySQL databases
+ * in particular and how to repair the table if it is corrupted in the process.
+ * <p>
+ * We will probably be able to abstract out a generic optimizer interface from this class in the
+ * future.
+ * </p>
+ */
+public class MySQLTableOptimizer
+{
+    /** The logger */
+    private static final Log log = LogManager.getLog( MySQLTableOptimizer.class );
+
+    /** The data source */
+    private final DataSource dataSource;
+
+    /** The name of the table. */
+    private String tableName;
+
+    /** optimizing, etc. */
+    private final TableState tableState;
+
+    /**
+     * This constructs an optimizer with the disk can properties.
+     * <p>
+     * @param attributes
+     * @param tableState We mark the table status as optimizing when this is happening.
+     * @param dataSource access to the database
+     */
+    public MySQLTableOptimizer( final MySQLDiskCacheAttributes attributes, final TableState tableState, final DataSource dataSource )
+    {
+        setTableName( attributes.getTableName() );
+
+        this.tableState = tableState;
+        this.dataSource = dataSource;
+    }
+
+    /**
+     * A scheduler will call this method. When it is called the table state is marked as optimizing.
+     * TODO we need to verify that no deletions are running before we call optimize. We should wait
+     * if a deletion is in progress.
+     * <p>
+     * This restores when there is an optimization error. The error output looks like this:
+     *
+     * <pre>
+     *           mysql&gt; optimize table JCS_STORE_FLIGHT_OPTION_ITINERARY;
+     *               +---------------------------------------------+----------+----------+---------------------+
+     *               | Table                                       | Op       | Msg_type | Msg_text            |
+     *               +---------------------------------------------+----------+----------+---------------------+
+     *               | jcs_cache.JCS_STORE_FLIGHT_OPTION_ITINERARY | optimize | error    | 2 when fixing table |
+     *               | jcs_cache.JCS_STORE_FLIGHT_OPTION_ITINERARY | optimize | status   | Operation failed    |
+     *               +---------------------------------------------+----------+----------+---------------------+
+     *               2 rows in set (51.78 sec)
+     * </pre>
+     *
+     * A successful repair response looks like this:
+     *
+     * <pre>
+     *        mysql&gt; REPAIR TABLE JCS_STORE_FLIGHT_OPTION_ITINERARY;
+     *            +---------------------------------------------+--------+----------+----------------------------------------------+
+     *            | Table                                       | Op     | Msg_type | Msg_text                                     |
+     *            +---------------------------------------------+--------+----------+----------------------------------------------+
+     *            | jcs_cache.JCS_STORE_FLIGHT_OPTION_ITINERARY | repair | error    | 2 when fixing table                          |
+     *            | jcs_cache.JCS_STORE_FLIGHT_OPTION_ITINERARY | repair | warning  | Number of rows changed from 131276 to 260461 |
+     *            | jcs_cache.JCS_STORE_FLIGHT_OPTION_ITINERARY | repair | status   | OK                                           |
+     *            +---------------------------------------------+--------+----------+----------------------------------------------+
+     *            3 rows in set (3 min 5.94 sec)
+     * </pre>
+     *
+     * A successful optimization looks like this:
+     *
+     * <pre>
+     *       mysql&gt; optimize table JCS_STORE_DEFAULT;
+     *           +-----------------------------+----------+----------+----------+
+     *           | Table                       | Op       | Msg_type | Msg_text |
+     *           +-----------------------------+----------+----------+----------+
+     *           | jcs_cache.JCS_STORE_DEFAULT | optimize | status   | OK       |
+     *           +-----------------------------+----------+----------+----------+
+     *           1 row in set (1.10 sec)
+     * </pre>
+     * @return true if it worked
+     */
+    public boolean optimizeTable()
+    {
+        final ElapsedTimer timer = new ElapsedTimer();
+        boolean success = false;
+
+        if ( tableState.getState() == TableState.OPTIMIZATION_RUNNING )
+        {
+            log.warn( "Skipping optimization. Optimize was called, but the "
+                    + "table state indicates that an optimization is currently running." );
+            return false;
+        }
+
+        try
+        {
+            tableState.setState( TableState.OPTIMIZATION_RUNNING );
+            log.info( "Optimizing table [{0}]", this.getTableName());
+
+            try (Connection con = dataSource.getConnection())
+            {
+                // TEST
+                try (Statement sStatement = con.createStatement())
+                {
+                    try (ResultSet rs = sStatement.executeQuery( "optimize table " + this.getTableName() ))
+                    {
+                        // first row is error, then status
+                        // if there is only one row in the result set, everything
+                        // should be fine.
+                        // This may be mysql version specific.
+                        if ( rs.next() )
+                        {
+                            final String status = rs.getString( "Msg_type" );
+                            final String message = rs.getString( "Msg_text" );
+
+                            log.info( "Message Type: {0}", status );
+                            log.info( "Message: {0}", message );
+
+                            if ( "error".equals( status ) )
+                            {
+                                log.warn( "Optimization was in error. Will attempt "
+                                        + "to repair the table. Message: {0}", message);
+
+                                // try to repair the table.
+                                success = repairTable( sStatement );
+                            }
+                            else
+                            {
+                                success = true;
+                            }
+                        }
+                    }
+
+                    // log the table status
+                    final String statusString = getTableStatus( sStatement );
+                    log.info( "Table status after optimizing table [{0}]: {1}",
+                            this.getTableName(), statusString );
+                }
+                catch ( final SQLException e )
+                {
+                    log.error( "Problem optimizing table [{0}]",
+                            this.getTableName(), e );
+                    return false;
+                }
+            }
+            catch ( final SQLException e )
+            {
+                log.error( "Problem getting connection.", e );
+            }
+        }
+        finally
+        {
+            tableState.setState( TableState.FREE );
+
+            log.info( "Optimization of table [{0}] took {1} ms.",
+                    this::getTableName, timer::getElapsedTime);
+        }
+
+        return success;
+    }
+
+    /**
+     * This calls show table status and returns the result as a String.
+     * <p>
+     * @param sStatement
+     * @return String
+     * @throws SQLException
+     */
+    protected String getTableStatus( final Statement sStatement )
+        throws SQLException
+    {
+        final StringBuilder statusString = new StringBuilder();
+        try (ResultSet statusResultSet = sStatement.executeQuery( "show table status" ))
+        {
+            final int numColumns = statusResultSet.getMetaData().getColumnCount();
+            while ( statusResultSet.next() )
+            {
+                statusString.append( "\n" );
+                for ( int i = 1; i <= numColumns; i++ )
+                {
+                    statusString.append(statusResultSet.getMetaData().getColumnLabel(i))
+                        .append(" [")
+                        .append(statusResultSet.getString(i))
+                        .append("]  |  ");
+                }
+            }
+        }
+        return statusString.toString();
+    }
+
+    /**
+     * This is called if the optimization is in error.
+     * <p>
+     * It looks for "OK" in response. If it find "OK" as a message in any result set row, it returns
+     * true. Otherwise we assume that the repair failed.
+     * <p>
+     * @param sStatement
+     * @return true if successful
+     * @throws SQLException
+     */
+    protected boolean repairTable( final Statement sStatement )
+        throws SQLException
+    {
+        boolean success = false;
+
+        // if( message != null && message.indexOf( ) )
+        final StringBuilder repairString = new StringBuilder();
+        try (ResultSet repairResult = sStatement.executeQuery( "repair table " + this.getTableName()))
+        {
+            final int numColumns = repairResult.getMetaData().getColumnCount();
+            while ( repairResult.next() )
+            {
+                for ( int i = 1; i <= numColumns; i++ )
+                {
+                    repairString.append(repairResult.getMetaData().getColumnLabel(i))
+                        .append(" [")
+                        .append(repairResult.getString(i))
+                        .append("]  |  ");
+                }
+
+                final String message = repairResult.getString( "Msg_text" );
+                if ( "OK".equals( message ) )
+                {
+                    success = true;
+                }
+            }
+        }
+
+        log.info("{0}", repairString);
+
+        if ( !success )
+        {
+            log.warn( "Failed to repair the table. {0}", repairString );
+        }
+        return success;
+    }
+
+    /**
+     * @param tableName The tableName to set.
+     */
+    public void setTableName( final String tableName )
+    {
+        this.tableName = tableName;
+    }
+
+    /**
+     * @return Returns the tableName.
+     */
+    public String getTableName()
+    {
+        return tableName;
+    }
+}
diff --git a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/util/ScheduleParser.java b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/util/ScheduleParser.java
index c6215594..33d444a1 100644
--- a/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/util/ScheduleParser.java
+++ b/commons-jcs-core/src/main/java/org/apache/commons/jcs3/auxiliary/disk/jdbc/mysql/util/ScheduleParser.java
@@ -1,93 +1,91 @@
-package org.apache.commons.jcs3.auxiliary.disk.jdbc.mysql.util;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Calendar;
-import java.util.Date;
-
-/**
- * Parses the very simple schedule format.
- * <p>
- * @author Aaron Smuts
- */
-public class ScheduleParser
-{
-    /**
-     * For each date time that is separated by a comma in the
-     * OptimizationSchedule, create a date and add it to an array of dates.
-     * <p>
-     * @param schedule
-     * @return Date[]
-     * @throws ParseException
-     */
-    public static Date[] createDatesForSchedule( final String schedule )
-        throws ParseException
-    {
-        if (schedule == null || schedule.isEmpty())
-        {
-            throw new ParseException( "Cannot create schedules for a null or empty String.", 0 );
-        }
-
-        final String timeStrings[] = schedule.split("\\s*,\\s*");
-        final Date[] dates = new Date[timeStrings.length];
-        int cnt = 0;
-        for (String time : timeStrings)
-        {
-            dates[cnt++] = getDateForSchedule(time);
-        }
-        return dates;
-    }
-
-    /**
-     * For a single string it creates a date that is the next time this hh:mm:ss
-     * combo will be seen.
-     * <p>
-     * @param startTime
-     * @return Date
-     * @throws ParseException
-     */
-    public static Date getDateForSchedule( final String startTime )
-        throws ParseException
-    {
-        if ( startTime == null )
-        {
-            throw new ParseException( "Cannot create date for a null String.", 0 );
-        }
-
-        final SimpleDateFormat sdf = new SimpleDateFormat("HH:mm:ss");
-        final Date date = sdf.parse(startTime);
-        final Calendar cal = Calendar.getInstance();
-        // This will result in a date of 1/1/1970
-        cal.setTime(date);
-
-        final Calendar now = Calendar.getInstance();
-        cal.set(now.get(Calendar.YEAR), now.get(Calendar.MONTH), now.get(Calendar.DAY_OF_MONTH));
-
-        // if the date is less than now, add a day.
-        if ( cal.before( now ) )
-        {
-            cal.add( Calendar.DAY_OF_MONTH, 1 );
-        }
-
-        return cal.getTime();
-    }
-}
+package org.apache.commons.jcs3.auxiliary.disk.jdbc.mysql.util;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.Calendar;
+import java.util.Date;
+
+/**
+ * Parses the very simple schedule format.
+ */
+public class ScheduleParser
+{
+    /**
+     * For each date time that is separated by a comma in the
+     * OptimizationSchedule, create a date and add it to an array of dates.
+     * <p>
+     * @param schedule
+     * @return Date[]
+     * @throws ParseException
+     */
+    public static Date[] createDatesForSchedule( final String schedule )
+        throws ParseException
+    {
+        if (schedule == null || schedule.isEmpty())
+        {
+            throw new ParseException( "Cannot create schedules for a null or empty String.", 0 );
+        }
+
+        final String timeStrings[] = schedule.split("\\s*,\\s*");
+        final Date[] dates = new Date[timeStrings.length];
+        int cnt = 0;
+        for (String time : timeStrings)
+        {
+            dates[cnt++] = getDateForSchedule(time);
+        }
+        return dates;
+    }
+
+    /**
+     * For a single string it creates a date that is the next time this hh:mm:ss
+     * combo will be seen.
+     * <p>
+     * @param startTime
+     * @return Date
+     * @throws ParseException
+     */
+    public static Date getDateForSchedule( final String startTime )
+        throws ParseException
+    {
+        if ( startTime == null )
+        {
+            throw new ParseException( "Cannot create date for a null String.", 0 );
+        }
+
+        final SimpleDateFormat sdf = new SimpleDateFormat("HH:mm:ss");
+        final Date date = sdf.parse(startTime);
+        final Calendar cal = Calendar.getInstance();
+        // This will result in a date of 1/1/1970
+        cal.setTime(date);
+
+        final Calendar now = Calendar.getInstance();
+        cal.set(now.get(Calendar.YEAR), now.get(Calendar.MONTH), now.get(Calendar.DAY_OF_MONTH));
+
+        // if the date is less than now, add a day.
+        if ( cal.before( now ) )
+        {
+            cal.add( Calendar.DAY_OF_MONTH, 1 );
... 49456 lines suppressed ...