You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@jackrabbit.apache.org by ck...@apache.org on 2012/11/27 13:39:16 UTC
svn commit: r1414155 - in
/jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core:
config/RepositoryConfigurationParser.java data/MultiDataStore.java
data/MultiDataStoreAware.java
Author: ckoell
Date: Tue Nov 27 12:39:14 2012
New Revision: 1414155
URL: http://svn.apache.org/viewvc?rev=1414155&view=rev
Log:
JCR-3389 Implement a MultiDataStore
Modified:
jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/RepositoryConfigurationParser.java
jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStore.java
jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStoreAware.java
Modified: jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/RepositoryConfigurationParser.java
URL: http://svn.apache.org/viewvc/jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/RepositoryConfigurationParser.java?rev=1414155&r1=1414154&r2=1414155&view=diff
==============================================================================
--- jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/RepositoryConfigurationParser.java (original)
+++ jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/RepositoryConfigurationParser.java Tue Nov 27 12:39:14 2012
@@ -16,6 +16,16 @@
*/
package org.apache.jackrabbit.core.config;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+import java.util.UUID;
+
+import javax.jcr.RepositoryException;
+import javax.xml.parsers.DocumentBuilderFactory;
+
import org.apache.commons.io.FileUtils;
import org.apache.jackrabbit.core.cluster.ClusterNode;
import org.apache.jackrabbit.core.data.DataStore;
@@ -40,26 +50,13 @@ import org.apache.jackrabbit.core.util.R
import org.apache.jackrabbit.core.util.RepositoryLockMechanismFactory;
import org.apache.jackrabbit.core.util.db.ConnectionFactory;
import org.apache.jackrabbit.spi.commons.namespace.NamespaceResolver;
-import org.w3c.dom.Attr;
-import org.w3c.dom.DOMException;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
-import org.w3c.dom.TypeInfo;
-import org.w3c.dom.UserDataHandler;
import org.xml.sax.InputSource;
-import java.io.File;
-import java.io.IOException;
-import java.util.Properties;
-import java.util.UUID;
-import java.util.List;
-import java.util.ArrayList;
-
-import javax.jcr.RepositoryException;
-
/**
* Configuration parser. This class is used to parse the repository and
* workspace configuration files.
@@ -1030,49 +1027,53 @@ public class RepositoryConfigurationPars
Node child = children.item(i);
if (child.getNodeType() == Node.ELEMENT_NODE
&& DATA_STORE_ELEMENT.equals(child.getNodeName())) {
- BeanConfig bc =
- parseBeanConfig(parent, DATA_STORE_ELEMENT);
+ BeanConfig bc = parseBeanConfig(parent, DATA_STORE_ELEMENT);
bc.setValidate(false);
DataStore store = bc.newInstance(DataStore.class);
if (store instanceof MultiDataStore) {
- DataStore primary = null;
- DataStore archive = null;
- NodeList subParamNodes = child.getChildNodes();
+ DataStore primary = null;
+ DataStore archive = null;
+ NodeList subParamNodes = child.getChildNodes();
for (int x = 0; x < subParamNodes.getLength(); x++) {
Node paramNode = subParamNodes.item(x);
if (paramNode.getNodeType() == Node.ELEMENT_NODE
- && (PRIMARY_DATASTORE_ATTRIBUTE.equals(paramNode.getAttributes().getNamedItem("name").getNodeValue())
- || ARCHIVE_DATASTORE_ATTRIBUTE.equals(paramNode.getAttributes().getNamedItem("name").getNodeValue()))) {
- final ElementImpl datastoreElement = new ElementImpl(DATA_STORE_ELEMENT, Node.ELEMENT_NODE, paramNode.getAttributes(), paramNode.getChildNodes());
- ElementImpl parent = new ElementImpl("parent", Node.ELEMENT_NODE, null, new NodeList() {
-
- @Override
- public Node item(int index) {
- return datastoreElement;
- }
-
- @Override
- public int getLength() {
- return 1;
- }
- });
- DataStore subDataStore = getDataStoreFactory(parent, directory).getDataStore();
- if (!MultiDataStoreAware.class.isAssignableFrom(subDataStore.getClass())) {
- throw new ConfigurationException("Only MultiDataStoreAware datastore's can be used within a MultiDataStore.");
- }
- String type = getAttribute((Element) paramNode, NAME_ATTRIBUTE);
- if (PRIMARY_DATASTORE_ATTRIBUTE.equals(type)) {
- primary = subDataStore;
- } else if (ARCHIVE_DATASTORE_ATTRIBUTE.equals(type)) {
- archive = subDataStore;
- }
+ && (PRIMARY_DATASTORE_ATTRIBUTE.equals(paramNode.getAttributes().getNamedItem("name").getNodeValue())
+ || ARCHIVE_DATASTORE_ATTRIBUTE.equals(paramNode.getAttributes().getNamedItem("name").getNodeValue()))) {
+ try {
+ Document document = DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument();
+ Element newParent = document.createElement("parent");
+ document.appendChild(newParent);
+ Element datastoreElement = document.createElement(DATA_STORE_ELEMENT);
+ newParent.appendChild(datastoreElement);
+ NodeList childNodes = paramNode.getChildNodes();
+ for (int y = 0; childNodes.getLength() > y; y++) {
+ datastoreElement.appendChild(document.importNode(childNodes.item(y), true));
+ }
+ NamedNodeMap attributes = paramNode.getAttributes();
+ for (int z = 0; attributes.getLength() > z; z++) {
+ Node item = attributes.item(z);
+ datastoreElement.setAttribute(CLASS_ATTRIBUTE, item.getNodeValue());
+ }
+ DataStore subDataStore = getDataStoreFactory(newParent, directory).getDataStore();
+ if (!MultiDataStoreAware.class.isAssignableFrom(subDataStore.getClass())) {
+ throw new ConfigurationException("Only MultiDataStoreAware datastore's can be used within a MultiDataStore.");
+ }
+ String type = getAttribute((Element) paramNode, NAME_ATTRIBUTE);
+ if (PRIMARY_DATASTORE_ATTRIBUTE.equals(type)) {
+ primary = subDataStore;
+ } else if (ARCHIVE_DATASTORE_ATTRIBUTE.equals(type)) {
+ archive = subDataStore;
+ }
+ } catch (Exception e) {
+ throw new ConfigurationException("Failed to parse the MultiDataStore element.", e);
+ }
}
}
if (primary == null || archive == null) {
- throw new ConfigurationException("A MultiDataStore must have configured a primary and archive datastore");
+ throw new ConfigurationException("A MultiDataStore must have configured a primary and archive datastore");
}
- ((MultiDataStore) store).setPrimaryDataStore(primary);
- ((MultiDataStore) store).setArchiveDataStore(archive);
+ ((MultiDataStore) store).setPrimaryDataStore(primary);
+ ((MultiDataStore) store).setArchiveDataStore(archive);
}
store.init(directory);
return store;
@@ -1179,305 +1180,4 @@ public class RepositoryConfigurationPars
public void setConfigVisitor(BeanConfigVisitor configVisitor) {
this.configVisitor = configVisitor;
}
-
- private class ElementImpl implements org.w3c.dom.Element {
-
- private String nodeName;
- private short nodeType;
- private NodeList childNodes;
- private NamedNodeMap params;
-
- public ElementImpl(String nodeName, short nodeType, NamedNodeMap params, NodeList nodeList) {
- this.nodeName = nodeName;
- this.nodeType = nodeType;
- this.childNodes = nodeList;
- this.params = params;
- }
-
- @Override
- public Node appendChild(Node newChild) throws DOMException {
- return null;
- }
-
- @Override
- public Node cloneNode(boolean deep) {
- return null;
- }
-
- @Override
- public short compareDocumentPosition(Node other) throws DOMException {
- return 0;
- }
-
- @Override
- public NamedNodeMap getAttributes() {
- return null;
- }
-
- @Override
- public String getBaseURI() {
- return null;
- }
-
- @Override
- public NodeList getChildNodes() {
- return childNodes;
- }
-
- @Override
- public Object getFeature(String feature, String version) {
- return null;
- }
-
- @Override
- public Node getFirstChild() {
- return null;
- }
-
- @Override
- public Node getLastChild() {
- return null;
- }
-
- @Override
- public String getLocalName() {
- return null;
- }
-
- @Override
- public String getNamespaceURI() {
- return null;
- }
-
- @Override
- public Node getNextSibling() {
- return null;
- }
-
- @Override
- public String getNodeName() {
- return nodeName;
- }
-
- @Override
- public short getNodeType() {
- return nodeType;
- }
-
- @Override
- public String getNodeValue() throws DOMException {
- return null;
- }
-
- @Override
- public Document getOwnerDocument() {
- return null;
- }
-
- @Override
- public Node getParentNode() {
- return null;
- }
-
- @Override
- public String getPrefix() {
- return null;
- }
-
- @Override
- public Node getPreviousSibling() {
- return null;
- }
-
- @Override
- public String getTextContent() throws DOMException {
- return null;
- }
-
- @Override
- public Object getUserData(String key) {
- return null;
- }
-
- @Override
- public boolean hasAttributes() {
- return false;
- }
-
- @Override
- public boolean hasChildNodes() {
- return false;
- }
-
- @Override
- public Node insertBefore(Node newChild, Node refChild)
- throws DOMException {
- return null;
- }
-
- @Override
- public boolean isDefaultNamespace(String namespaceURI) {
- return false;
- }
-
- @Override
- public boolean isEqualNode(Node arg) {
- return false;
- }
-
- @Override
- public boolean isSameNode(Node other) {
- return false;
- }
-
- @Override
- public boolean isSupported(String feature, String version) {
- return false;
- }
-
- @Override
- public String lookupNamespaceURI(String prefix) {
- return null;
- }
-
- @Override
- public String lookupPrefix(String namespaceURI) {
- return null;
- }
-
- @Override
- public void normalize() {
- }
-
- @Override
- public Node removeChild(Node oldChild) throws DOMException {
- return null;
- }
-
- @Override
- public Node replaceChild(Node newChild, Node oldChild)
- throws DOMException {
- return null;
- }
-
- @Override
- public void setNodeValue(String nodeValue) throws DOMException {
- }
-
- @Override
- public void setPrefix(String prefix) throws DOMException {
- }
-
- @Override
- public void setTextContent(String textContent) throws DOMException {
- }
-
- @Override
- public Object setUserData(String key, Object data,
- UserDataHandler handler) {
- return null;
- }
-
- @Override
- public String getAttribute(String name) {
- return null;
- }
-
- @Override
- public String getAttributeNS(String namespaceURI, String localName)
- throws DOMException {
- return null;
- }
-
- @Override
- public Attr getAttributeNode(String name) {
- return (Attr) params.getNamedItem(VALUE_ATTRIBUTE);
- }
-
- @Override
- public Attr getAttributeNodeNS(String namespaceURI, String localName)
- throws DOMException {
- return null;
- }
-
- @Override
- public NodeList getElementsByTagName(String name) {
- return null;
- }
-
- @Override
- public NodeList getElementsByTagNameNS(String namespaceURI,
- String localName) throws DOMException {
- return null;
- }
-
- @Override
- public TypeInfo getSchemaTypeInfo() {
- return null;
- }
-
- @Override
- public String getTagName() {
- return null;
- }
-
- @Override
- public boolean hasAttribute(String name) {
- return false;
- }
-
- @Override
- public boolean hasAttributeNS(String namespaceURI, String localName)
- throws DOMException {
- return false;
- }
-
- @Override
- public void removeAttribute(String name) throws DOMException {
- }
-
- @Override
- public void removeAttributeNS(String namespaceURI, String localName)
- throws DOMException {
- }
-
- @Override
- public Attr removeAttributeNode(Attr oldAttr) throws DOMException {
- return null;
- }
-
- @Override
- public void setAttribute(String name, String value) throws DOMException {
- }
-
- @Override
- public void setAttributeNS(String namespaceURI, String qualifiedName,
- String value) throws DOMException {
- }
-
- @Override
- public Attr setAttributeNode(Attr newAttr) throws DOMException {
- return null;
- }
-
- @Override
- public Attr setAttributeNodeNS(Attr newAttr) throws DOMException {
- return null;
- }
-
- @Override
- public void setIdAttribute(String name, boolean isId)
- throws DOMException {
- }
-
- @Override
- public void setIdAttributeNS(String namespaceURI, String localName,
- boolean isId) throws DOMException {
- }
-
- @Override
- public void setIdAttributeNode(Attr idAttr, boolean isId)
- throws DOMException {
- }
- }
}
Modified: jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStore.java
URL: http://svn.apache.org/viewvc/jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStore.java?rev=1414155&r1=1414154&r2=1414155&view=diff
==============================================================================
--- jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStore.java (original)
+++ jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStore.java Tue Nov 27 12:39:14 2012
@@ -47,7 +47,8 @@ import org.slf4j.LoggerFactory;
* other one like a archive DataStore on a slower storage system. All Files will
* be added to the primary DataStore. On read operations first the primary
* dataStore will be used and if no Record is found the archive DataStore will
- * be used. The GarabageCollector will only remove files from the archive DataStore.
+ * be used. The GarabageCollector will only remove files from the archive
+ * DataStore.
* <p>
* The internal MoveDataTask will be started automatically and could be
* configured with the following properties.
@@ -75,577 +76,605 @@ import org.slf4j.LoggerFactory;
* <li><code>maxAge</code>: defines how many days the content will reside in the
* primary data store. DataRecords that have been added before this time span
* will be moved to the archive data store. (default = <code>60</code>)</li>
- * <li><code>moveDataTaskSleep</code>: specifies the sleep time of the moveDataTaskThread
- * in seconds. (default = 60 * 60 * 24 * 7, which equals 7 days)</li>
- * <li><code>moveDataTaskNextRunHourOfDay</code>: specifies the hour at which the
- * moveDataTaskThread initiates its first run (default = <code>1</code> which means
- * 01:00 at night)</li>
+ * <li><code>moveDataTaskSleep</code>: specifies the sleep time of the
+ * moveDataTaskThread in seconds. (default = 60 * 60 * 24 * 7, which equals 7
+ * days)</li>
+ * <li><code>moveDataTaskNextRunHourOfDay</code>: specifies the hour at which
+ * the moveDataTaskThread initiates its first run (default = <code>1</code>
+ * which means 01:00 at night)</li>
* <li><code>sleepBetweenRecords</code>: specifies the delay in milliseconds
* between scanning data records (default = <code>100</code>)</li>
- * <li><code>delayedDelete</code>: its possible to delay the delete operation
- * on the primary data store. The DataIdentifiers will be written to a temporary file.
- * The file will be processed after a defined sleep (see <code>delayedDeleteSleep</code>)
- * It's useful if you like to create a snapshot of the primary data store backend
- * in the meantime before the data will be deleted. (default = <code>false</code>)</li>
- * <li><code>delayedDeleteSleep</code>: specifies the sleep time of the delayedDeleteTaskThread
- * in seconds. (default = 60 * 60 * 24, which equals 1 day). This means the delayed delete
- * from the primary data store will be processed after one day.</li>
+ * <li><code>delayedDelete</code>: its possible to delay the delete operation on
+ * the primary data store. The DataIdentifiers will be written to a temporary
+ * file. The file will be processed after a defined sleep (see
+ * <code>delayedDeleteSleep</code>) It's useful if you like to create a snapshot
+ * of the primary data store backend in the meantime before the data will be
+ * deleted. (default = <code>false</code>)</li>
+ * <li><code>delayedDeleteSleep</code>: specifies the sleep time of the
+ * delayedDeleteTaskThread in seconds. (default = 60 * 60 * 24, which equals 1
+ * day). This means the delayed delete from the primary data store will be
+ * processed after one day.</li>
* </ul>
*/
public class MultiDataStore implements DataStore {
- /**
- * Logger instance
- */
- private static Logger log = LoggerFactory.getLogger(MultiDataStore.class);
-
- private DataStore primaryDataStore;
- private DataStore archiveDataStore;
-
- /**
- * Max Age in days.
- */
- private int maxAge = 60;
-
- /**
- * ReentrantLock that is used while the MoveDataTask is running.
- */
- private ReentrantLock moveDataTaskLock = new ReentrantLock();
- private boolean moveDataTaskRunning = false;
- private Thread moveDataTaskThread;
-
- /**
- * The sleep time in seconds of the MoveDataTask, 7 day default.
- */
- private int moveDataTaskSleep = 60 * 60 * 24 * 7;
-
- /**
- * Indicates when the next run of the move task is scheduled. The first run is
- * scheduled by default at 01:00 hours.
- */
- private Calendar moveDataTaskNextRun = Calendar.getInstance();
-
- /**
- * Its possible to delay the delete operation on the primary data store
- * while move task is running. The delete will be executed after defined
- * delayDeleteSleep.
- */
- private boolean delayedDelete = false;
-
- /**
- * The sleep time in seconds to delay remove operation
- * on the primary data store, 1 day default.
- */
- private long delayedDeleteSleep = 60 * 60 * 24;
-
+ /**
+ * Logger instance
+ */
+ private static Logger log = LoggerFactory.getLogger(MultiDataStore.class);
+
+ private DataStore primaryDataStore;
+ private DataStore archiveDataStore;
+
+ /**
+ * Max Age in days.
+ */
+ private int maxAge = 60;
+
+ /**
+ * ReentrantLock that is used while the MoveDataTask is running.
+ */
+ private ReentrantLock moveDataTaskLock = new ReentrantLock();
+ private boolean moveDataTaskRunning = false;
+ private Thread moveDataTaskThread;
+
+ /**
+ * The sleep time in seconds of the MoveDataTask, 7 day default.
+ */
+ private int moveDataTaskSleep = 60 * 60 * 24 * 7;
+
+ /**
+ * Indicates when the next run of the move task is scheduled. The first run
+ * is scheduled by default at 01:00 hours.
+ */
+ private Calendar moveDataTaskNextRun = Calendar.getInstance();
+
+ /**
+ * Its possible to delay the delete operation on the primary data store
+ * while move task is running. The delete will be executed after defined
+ * delayDeleteSleep.
+ */
+ private boolean delayedDelete = false;
+
+ /**
+ * The sleep time in seconds to delay remove operation on the primary data
+ * store, 1 day default.
+ */
+ private long delayedDeleteSleep = 60 * 60 * 24;
+
/**
* File that holds the data identifiers if delayDelete is enabled.
*/
private FileSystemResource identifiersToDeleteFile = null;
- private Thread deleteDelayedIdentifiersTaskThread;
+ private Thread deleteDelayedIdentifiersTaskThread;
- /**
- * Name of the file which holds the identifiers if deleayed delete is enabled
+ /**
+ * Name of the file which holds the identifiers if deleayed delete is
+ * enabled
*/
private final String IDENTIFIERS_TO_DELETE_FILE_KEY = "identifiersToDelete";
-
- /**
- * The delay time in milliseconds between scanning data records, 100 default.
- */
- private long sleepBetweenRecords = 100;
-
- {
- if (moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY) >= 1) {
- moveDataTaskNextRun.add(Calendar.DAY_OF_MONTH, 1);
- }
- moveDataTaskNextRun.set(Calendar.HOUR_OF_DAY, 1);
- moveDataTaskNextRun.set(Calendar.MINUTE, 0);
- moveDataTaskNextRun.set(Calendar.SECOND, 0);
- moveDataTaskNextRun.set(Calendar.MILLISECOND, 0);
- }
-
- /**
- * Setter for the primary dataStore
- *
- * @param dataStore
- */
- public void setPrimaryDataStore(DataStore dataStore) {
- this.primaryDataStore = dataStore;
- }
-
- /**
- * Setter for the archive dataStore
- *
- * @param dataStore
- */
- public void setArchiveDataStore(DataStore dataStore) {
- this.archiveDataStore = dataStore;
- }
-
- /**
- * Check if a record for the given identifier exists in the primary data
- * store. If not found there it will be returned from the archive data
- * store. If no record exists, this method returns null.
- *
- * @param identifier
- * data identifier
- * @return the record if found, and null if not
- */
- public DataRecord getRecordIfStored(DataIdentifier identifier)
- throws DataStoreException {
- if (moveDataTaskRunning) {
- moveDataTaskLock.lock();
- }
- try {
- DataRecord dataRecord = primaryDataStore
- .getRecordIfStored(identifier);
- if (dataRecord == null) {
- dataRecord = archiveDataStore.getRecordIfStored(identifier);
- }
- return dataRecord;
- } finally {
- if (moveDataTaskRunning) {
- moveDataTaskLock.unlock();
- }
- }
- }
-
- /**
- * Returns the identified data record from the primary data store. If not
- * found there it will be returned from the archive data store. The given
- * identifier should be the identifier of a previously saved data record.
- * Since records are never removed, there should never be cases where the
- * identified record is not found. Abnormal cases like that are treated as
- * errors and handled by throwing an exception.
- *
- * @param identifier
- * data identifier
- * @return identified data record
- * @throws DataStoreException
- * if the data store could not be accessed, or if the given
- * identifier is invalid
- */
- public DataRecord getRecord(DataIdentifier identifier)
- throws DataStoreException {
- if (moveDataTaskRunning) {
- moveDataTaskLock.lock();
- }
- try {
- return primaryDataStore.getRecord(identifier);
- } catch (DataStoreException e) {
- return archiveDataStore.getRecord(identifier);
- } finally {
- if (moveDataTaskRunning) {
- moveDataTaskLock.unlock();
- }
- }
- }
-
- /**
- * Creates a new data record in the primary data store. The given binary
- * stream is consumed and a binary record containing the consumed stream is
- * created and returned. If the same stream already exists in another
- * record, then that record is returned instead of creating a new one.
- * <p>
- * The given stream is consumed and <strong>not closed</strong> by this
- * method. It is the responsibility of the caller to close the stream. A
- * typical call pattern would be:
- *
- * <pre>
- * InputStream stream = ...;
- * try {
- * record = store.addRecord(stream);
- * } finally {
- * stream.close();
- * }
- * </pre>
- *
- * @param stream
- * binary stream
- * @return data record that contains the given stream
- * @throws DataStoreException
- * if the data store could not be accessed
- */
- public DataRecord addRecord(InputStream stream) throws DataStoreException {
- return primaryDataStore.addRecord(stream);
- }
-
- /**
- * From now on, update the modified date of an object even when accessing it
- * in the archive data store. Usually, the modified date is only updated
- * when creating a new object, or when a new link is added to an existing
- * object. When this setting is enabled, even getLength() will update the
- * modified date.
- *
- * @param before
- * - update the modified date to the current time if it is older
- * than this value
- */
- public void updateModifiedDateOnAccess(long before) {
- archiveDataStore.updateModifiedDateOnAccess(before);
- }
-
- /**
- * Delete objects that have a modified date older than the specified date
- * from the archive data store.
- *
- * @param min
- * the minimum time
- * @return the number of data records deleted
- * @throws DataStoreException
- */
- public int deleteAllOlderThan(long min) throws DataStoreException {
- return archiveDataStore.deleteAllOlderThan(min);
- }
-
- /**
- * Get all identifiers from the archive data store.
- *
- * @return an iterator over all DataIdentifier objects
- * @throws DataStoreException
- * if the list could not be read
- */
- public Iterator<DataIdentifier> getAllIdentifiers()
- throws DataStoreException {
- return archiveDataStore.getAllIdentifiers();
- }
-
- public void init(String homeDir) throws RepositoryException {
- if (delayedDelete) {
- // First initialize the identifiersToDeleteFile
- LocalFileSystem fileSystem = new LocalFileSystem();
- fileSystem.setRoot(new File(homeDir));
- identifiersToDeleteFile = new FileSystemResource(fileSystem, FileSystem.SEPARATOR + IDENTIFIERS_TO_DELETE_FILE_KEY);
- }
- moveDataTaskThread = new Thread(new MoveDataTask(), "Jackrabbit-MulitDataStore-MoveDataTaskThread");
- moveDataTaskThread.setDaemon(true);
- moveDataTaskThread.start();
- log.info("MultiDataStore-MoveDataTask thread started; first run scheduled at " + moveDataTaskNextRun.getTime());
- if (delayedDelete) {
- try {
- // Run on startup the DeleteDelayedIdentifiersTask only if the file exists and modify date is older than the delayedDeleteSleep timeout ...
- if (identifiersToDeleteFile != null && identifiersToDeleteFile.exists() && (identifiersToDeleteFile.lastModified() + (delayedDeleteSleep * 1000)) < System.currentTimeMillis()) {
- deleteDelayedIdentifiersTaskThread = new Thread(new DeleteDelayedIdentifiersTask(), "Jackrabbit-MultiDataStore-DeleteDelayedIdentifiersTaskThread");
- deleteDelayedIdentifiersTaskThread.setDaemon(true);
- deleteDelayedIdentifiersTaskThread.start();
- log.info("Old entries in the " + IDENTIFIERS_TO_DELETE_FILE_KEY + " File found. DeleteDelayedIdentifiersTask-Thread started now.");
- }
- } catch (FileSystemException e) {
- throw new RepositoryException("I/O error while reading from '"
- + identifiersToDeleteFile.getPath() + "'", e);
- }
- }
- }
-
- /**
- * Get the minimum size of an object that should be stored in the primary
- * data store.
- *
- * @return the minimum size in bytes
- */
- public int getMinRecordLength() {
- return primaryDataStore.getMinRecordLength();
- }
-
- public void close() throws DataStoreException {
- DataStoreException lastException = null;
- // 1. close the primary data store
- try {
- primaryDataStore.close();
- } catch (DataStoreException e) {
- lastException = e;
- }
- // 2. close the archive data store
- try {
- archiveDataStore.close();
- } catch (DataStoreException e) {
- if (lastException != null) {
- lastException = new DataStoreException(lastException);
- }
- }
- // 3. if moveDataTaskThread is running interrupt it
- try {
- if (moveDataTaskRunning) {
- moveDataTaskThread.interrupt();
- }
- } catch (Exception e) {
- if (lastException != null) {
- lastException = new DataStoreException(lastException);
- }
- }
- // 4. if deleteDelayedIdentifiersTaskThread is running interrupt it
- try {
- if (deleteDelayedIdentifiersTaskThread != null && deleteDelayedIdentifiersTaskThread.isAlive()) {
- deleteDelayedIdentifiersTaskThread.interrupt();
- }
- } catch (Exception e) {
- if (lastException != null) {
- lastException = new DataStoreException(lastException);
- }
- }
- if (lastException != null) {
- throw lastException;
- }
- }
-
- public void clearInUse() {
- archiveDataStore.clearInUse();
- }
-
- public int getMaxAge() {
- return maxAge;
- }
-
- public void setMaxAge(int maxAge) {
- this.maxAge = maxAge;
- }
-
- public int getMoveDataTaskSleep() {
- return moveDataTaskSleep;
- }
-
- public int getMoveDataTaskFirstRunHourOfDay() {
- return moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY);
- }
-
- public void setMoveDataTaskSleep(int sleep) {
- this.moveDataTaskSleep = sleep;
- }
-
- public void setMoveDataTaskFirstRunHourOfDay(int hourOfDay) {
- moveDataTaskNextRun = Calendar.getInstance();
- if (moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY) >= hourOfDay) {
- moveDataTaskNextRun.add(Calendar.DAY_OF_MONTH, 1);
- }
- moveDataTaskNextRun.set(Calendar.HOUR_OF_DAY, hourOfDay);
- moveDataTaskNextRun.set(Calendar.MINUTE, 0);
- moveDataTaskNextRun.set(Calendar.SECOND, 0);
- moveDataTaskNextRun.set(Calendar.MILLISECOND, 0);
- }
-
- public void setSleepBetweenRecords(long millis) {
- this.sleepBetweenRecords = millis;
- }
-
- public long getSleepBetweenRecords() {
- return sleepBetweenRecords;
- }
-
- public boolean isDelayedDelete() {
- return delayedDelete;
- }
-
- public void setDelayedDelete(boolean delayedDelete) {
- this.delayedDelete = delayedDelete;
- }
-
- public long getDelayedDeleteSleep() {
- return delayedDeleteSleep;
- }
-
- public void setDelayedDeleteSleep(long delayedDeleteSleep) {
- this.delayedDeleteSleep = delayedDeleteSleep;
- }
-
- /**
- * Writes the given DataIdentifier to the delayedDeletedFile.
- *
- * @param identifier
- * @return boolean true if it was successful otherwise false
- */
- private boolean writeDelayedDataIdentifier(DataIdentifier identifier) {
- BufferedWriter writer = null;
- try {
- File identifierFile = new File(((LocalFileSystem)identifiersToDeleteFile.getFileSystem()).getPath(),
- identifiersToDeleteFile.getPath());
- writer = new BufferedWriter(
- new FileWriter(identifierFile, true));
- return true;
- } catch (Exception e) {
- log.warn("I/O error while saving DataIdentifier (stacktrace on DEBUG log level) to '"
- + identifiersToDeleteFile.getPath() + "': " + e.getMessage());
- log.debug("Root cause: ", e);
- return false;
- } finally {
- IOUtils.closeQuietly(writer);
- }
- }
-
- /**
- * Purges the delayedDeletedFile.
- * @return boolean true if it was successful otherwise false
- */
- private boolean purgeDelayedDeleteFile() {
- BufferedWriter writer = null;
- try {
- writer = new BufferedWriter(
- new OutputStreamWriter(identifiersToDeleteFile.getOutputStream()));
- writer.write("");
- return true;
- } catch (Exception e) {
- log.warn("I/O error while purging (stacktrace on DEBUG log level) the " + IDENTIFIERS_TO_DELETE_FILE_KEY + " file '"
- + identifiersToDeleteFile.getPath() + "': " + e.getMessage());
- log.debug("Root cause: ", e);
- return false;
- } finally {
- IOUtils.closeQuietly(writer);
- }
- }
-
- /**
- * Class for maintaining the MultiDataStore. It will be used to move the
- * content of the primary data store to the archive data store.
- */
- public class MoveDataTask implements Runnable {
-
- /**
- * {@inheritDoc}
- */
- public void run() {
- while (!Thread.currentThread().isInterrupted()) {
- try {
- log.info("Next move-data task run scheduled at " + moveDataTaskNextRun.getTime());
- long sleepTime = moveDataTaskNextRun.getTimeInMillis() - System.currentTimeMillis();
- if (sleepTime > 0) {
- Thread.sleep(sleepTime);
- }
- moveDataTaskRunning = true;
- moveOutdatedData();
- moveDataTaskRunning = false;
- moveDataTaskNextRun.add(Calendar.SECOND, moveDataTaskSleep);
- if (delayedDelete) {
- if (deleteDelayedIdentifiersTaskThread != null && deleteDelayedIdentifiersTaskThread.isAlive()) {
- log.warn("The DeleteDelayedIdentifiersTask-Thread is already running.");
- } else {
- deleteDelayedIdentifiersTaskThread = new Thread(new DeleteDelayedIdentifiersTask(), "Jackrabbit-MultiDataStore-DeleteDelayedIdentifiersTaskThread");
- deleteDelayedIdentifiersTaskThread.setDaemon(true);
- deleteDelayedIdentifiersTaskThread.start();
- }
- }
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- }
- }
- log.warn("Interrupted: stopping move-data task.");
- }
-
- /**
- * Moves outdated data from primary to archive data store
- */
- protected void moveOutdatedData() {
- try {
- long now = System.currentTimeMillis();
- long maxAgeMilli = 1000 * 60 * 60 * 24 * maxAge;
- Iterator<DataIdentifier> allIdentifiers = primaryDataStore.getAllIdentifiers();
- int moved = 0;
- while (allIdentifiers.hasNext()) {
- DataIdentifier identifier = allIdentifiers.next();
- DataRecord dataRecord = primaryDataStore.getRecord(identifier);
- if ((dataRecord.getLastModified() + maxAgeMilli) < now) {
- try {
- moveDataTaskLock.lock();
- if (delayedDelete) {
- // first write it to the file and then add it to the archive data store ...
- if (writeDelayedDataIdentifier(identifier)) {
- archiveDataStore.addRecord(dataRecord.getStream());
- moved++;
- }
- } else {
- // first add it and then delete it .. not really atomic ...
- archiveDataStore.addRecord(dataRecord.getStream());
- ((MultiDataStoreAware) primaryDataStore).deleteRecord(identifier);
- moved++;
- }
- } catch (DataStoreException e) {
- log.error("Failed to move DataRecord. DataIdentifier: " + identifier, e);
- } finally {
- moveDataTaskLock.unlock();
- }
- }
- // Give other threads time to use the MultiDataStore while
- // MoveDataTask is running..
- Thread.sleep(sleepBetweenRecords);
- }
- if (delayedDelete) {
- log.info("Moved " + moved + " DataRecords to the archive data store. The DataRecords in the primary data store will be removed in " + delayedDeleteSleep +" seconds.");
- } else {
- log.info("Moved " + moved + " DataRecords to the archive data store.");
- }
- } catch (Exception e) {
- log.warn("Failed to run move-data task.", e);
- }
- }
- }
-
- /**
- * Class to clean up the delayed DataRecords from the primary data store.
- */
- public class DeleteDelayedIdentifiersTask implements Runnable {
-
- boolean run = true;
-
- @Override
- public void run() {
- if (moveDataTaskRunning) {
- log.warn("It's not supported to run the DeleteDelayedIdentifiersTask while the MoveDataTask is running.");
- return;
- }
- while (run && !Thread.currentThread().isInterrupted()) {
- BufferedReader reader = null;
- ArrayList<DataIdentifier> problemIdentifiers = new ArrayList<DataIdentifier>();
- try {
- int deleted = 0;
- reader = new BufferedReader(
- new InputStreamReader(identifiersToDeleteFile.getInputStream()));
- while (true) {
- String s = reader.readLine();
- if (s == null || s.equals("")) {
- break;
- }
- DataIdentifier identifier = new DataIdentifier(s);
- try {
- moveDataTaskLock.lock();
- ((MultiDataStoreAware) primaryDataStore).deleteRecord(identifier);
- deleted++;
- } catch (DataStoreException e) {
- log.error("Failed to delete DataRecord. DataIdentifier: " + identifier, e);
- problemIdentifiers.add(identifier);
- } finally {
- moveDataTaskLock.unlock();
- }
- // Give other threads time to use the MultiDataStore while
- // DeleteDelayedIdentifiersTask is running..
- Thread.sleep(sleepBetweenRecords);
- }
- log.info("Deleted " + deleted+ " DataRecords from the primary data store.");
- if (problemIdentifiers.isEmpty()) {
- try {
- identifiersToDeleteFile.delete();
- } catch (FileSystemException e) {
- log.warn("Unable to delete the " + IDENTIFIERS_TO_DELETE_FILE_KEY + " File.");
- if (!purgeDelayedDeleteFile()) {
- log.error("Unable to purge the " + IDENTIFIERS_TO_DELETE_FILE_KEY + " File.");
- }
- }
- } else {
- if (purgeDelayedDeleteFile()) {
- for (int x = 0; x < problemIdentifiers.size(); x++) {
- writeDelayedDataIdentifier(problemIdentifiers.get(x));
- }
- }
- }
- } catch (InterruptedException e) {
- log.warn("Interrupted: stopping delayed-delete task.");
- Thread.currentThread().interrupt();
- } catch (Exception e) {
- log.warn("Failed to run delayed-delete task.", e);
- } finally {
- IOUtils.closeQuietly(reader);
- run = false;
- }
- }
- }
- }
+ /**
+ * The delay time in milliseconds between scanning data records, 100
+ * default.
+ */
+ private long sleepBetweenRecords = 100;
+
+ {
+ if (moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY) >= 1) {
+ moveDataTaskNextRun.add(Calendar.DAY_OF_MONTH, 1);
+ }
+ moveDataTaskNextRun.set(Calendar.HOUR_OF_DAY, 1);
+ moveDataTaskNextRun.set(Calendar.MINUTE, 0);
+ moveDataTaskNextRun.set(Calendar.SECOND, 0);
+ moveDataTaskNextRun.set(Calendar.MILLISECOND, 0);
+ }
+
+ /**
+ * Setter for the primary dataStore
+ *
+ * @param dataStore
+ */
+ public void setPrimaryDataStore(DataStore dataStore) {
+ this.primaryDataStore = dataStore;
+ }
+
+ /**
+ * Setter for the archive dataStore
+ *
+ * @param dataStore
+ */
+ public void setArchiveDataStore(DataStore dataStore) {
+ this.archiveDataStore = dataStore;
+ }
+
+ /**
+ * Check if a record for the given identifier exists in the primary data
+ * store. If not found there it will be returned from the archive data
+ * store. If no record exists, this method returns null.
+ *
+ * @param identifier
+ * data identifier
+ * @return the record if found, and null if not
+ */
+ public DataRecord getRecordIfStored(DataIdentifier identifier) throws DataStoreException {
+ if (moveDataTaskRunning) {
+ moveDataTaskLock.lock();
+ }
+ try {
+ DataRecord dataRecord = primaryDataStore.getRecordIfStored(identifier);
+ if (dataRecord == null) {
+ dataRecord = archiveDataStore.getRecordIfStored(identifier);
+ }
+ return dataRecord;
+ } finally {
+ if (moveDataTaskRunning) {
+ moveDataTaskLock.unlock();
+ }
+ }
+ }
+
+ /**
+ * Returns the identified data record from the primary data store. If not
+ * found there it will be returned from the archive data store. The given
+ * identifier should be the identifier of a previously saved data record.
+ * Since records are never removed, there should never be cases where the
+ * identified record is not found. Abnormal cases like that are treated as
+ * errors and handled by throwing an exception.
+ *
+ * @param identifier
+ * data identifier
+ * @return identified data record
+ * @throws DataStoreException
+ * if the data store could not be accessed, or if the given
+ * identifier is invalid
+ */
+ public DataRecord getRecord(DataIdentifier identifier) throws DataStoreException {
+ if (moveDataTaskRunning) {
+ moveDataTaskLock.lock();
+ }
+ try {
+ return primaryDataStore.getRecord(identifier);
+ } catch (DataStoreException e) {
+ return archiveDataStore.getRecord(identifier);
+ } finally {
+ if (moveDataTaskRunning) {
+ moveDataTaskLock.unlock();
+ }
+ }
+ }
+
+ /**
+ * Creates a new data record in the primary data store. The given binary
+ * stream is consumed and a binary record containing the consumed stream is
+ * created and returned. If the same stream already exists in another
+ * record, then that record is returned instead of creating a new one.
+ * <p>
+ * The given stream is consumed and <strong>not closed</strong> by this
+ * method. It is the responsibility of the caller to close the stream. A
+ * typical call pattern would be:
+ *
+ * <pre>
+ * InputStream stream = ...;
+ * try {
+ * record = store.addRecord(stream);
+ * } finally {
+ * stream.close();
+ * }
+ * </pre>
+ *
+ * @param stream
+ * binary stream
+ * @return data record that contains the given stream
+ * @throws DataStoreException
+ * if the data store could not be accessed
+ */
+ public DataRecord addRecord(InputStream stream) throws DataStoreException {
+ return primaryDataStore.addRecord(stream);
+ }
+
+ /**
+ * From now on, update the modified date of an object even when accessing it
+ * in the archive data store. Usually, the modified date is only updated
+ * when creating a new object, or when a new link is added to an existing
+ * object. When this setting is enabled, even getLength() will update the
+ * modified date.
+ *
+ * @param before
+ * - update the modified date to the current time if it is older
+ * than this value
+ */
+ public void updateModifiedDateOnAccess(long before) {
+ archiveDataStore.updateModifiedDateOnAccess(before);
+ }
+
+ /**
+ * Delete objects that have a modified date older than the specified date
+ * from the archive data store.
+ *
+ * @param min
+ * the minimum time
+ * @return the number of data records deleted
+ * @throws DataStoreException
+ */
+ public int deleteAllOlderThan(long min) throws DataStoreException {
+ return archiveDataStore.deleteAllOlderThan(min);
+ }
+
+ /**
+ * Get all identifiers from the archive data store.
+ *
+ * @return an iterator over all DataIdentifier objects
+ * @throws DataStoreException
+ * if the list could not be read
+ */
+ public Iterator<DataIdentifier> getAllIdentifiers() throws DataStoreException {
+ return archiveDataStore.getAllIdentifiers();
+ }
+
+ public void init(String homeDir) throws RepositoryException {
+ if (delayedDelete) {
+ // First initialize the identifiersToDeleteFile
+ LocalFileSystem fileSystem = new LocalFileSystem();
+ fileSystem.setRoot(new File(homeDir));
+ identifiersToDeleteFile = new FileSystemResource(fileSystem, FileSystem.SEPARATOR
+ + IDENTIFIERS_TO_DELETE_FILE_KEY);
+ }
+ moveDataTaskThread = new Thread(new MoveDataTask(),
+ "Jackrabbit-MulitDataStore-MoveDataTaskThread");
+ moveDataTaskThread.setDaemon(true);
+ moveDataTaskThread.start();
+ log.info("MultiDataStore-MoveDataTask thread started; first run scheduled at "
+ + moveDataTaskNextRun.getTime());
+ if (delayedDelete) {
+ try {
+ // Run on startup the DeleteDelayedIdentifiersTask only if the
+ // file exists and modify date is older than the
+ // delayedDeleteSleep timeout ...
+ if (identifiersToDeleteFile != null
+ && identifiersToDeleteFile.exists()
+ && (identifiersToDeleteFile.lastModified() + (delayedDeleteSleep * 1000)) < System
+ .currentTimeMillis()) {
+ deleteDelayedIdentifiersTaskThread = new Thread(
+ new DeleteDelayedIdentifiersTask(),
+ "Jackrabbit-MultiDataStore-DeleteDelayedIdentifiersTaskThread");
+ deleteDelayedIdentifiersTaskThread.setDaemon(true);
+ deleteDelayedIdentifiersTaskThread.start();
+ log.info("Old entries in the " + IDENTIFIERS_TO_DELETE_FILE_KEY
+ + " File found. DeleteDelayedIdentifiersTask-Thread started now.");
+ }
+ } catch (FileSystemException e) {
+ throw new RepositoryException("I/O error while reading from '"
+ + identifiersToDeleteFile.getPath() + "'", e);
+ }
+ }
+ }
+
+ /**
+ * Get the minimum size of an object that should be stored in the primary
+ * data store.
+ *
+ * @return the minimum size in bytes
+ */
+ public int getMinRecordLength() {
+ return primaryDataStore.getMinRecordLength();
+ }
+
+ public void close() throws DataStoreException {
+ DataStoreException lastException = null;
+ // 1. close the primary data store
+ try {
+ primaryDataStore.close();
+ } catch (DataStoreException e) {
+ lastException = e;
+ }
+ // 2. close the archive data store
+ try {
+ archiveDataStore.close();
+ } catch (DataStoreException e) {
+ if (lastException != null) {
+ lastException = new DataStoreException(lastException);
+ }
+ }
+ // 3. if moveDataTaskThread is running interrupt it
+ try {
+ if (moveDataTaskRunning) {
+ moveDataTaskThread.interrupt();
+ }
+ } catch (Exception e) {
+ if (lastException != null) {
+ lastException = new DataStoreException(lastException);
+ }
+ }
+ // 4. if deleteDelayedIdentifiersTaskThread is running interrupt it
+ try {
+ if (deleteDelayedIdentifiersTaskThread != null
+ && deleteDelayedIdentifiersTaskThread.isAlive()) {
+ deleteDelayedIdentifiersTaskThread.interrupt();
+ }
+ } catch (Exception e) {
+ if (lastException != null) {
+ lastException = new DataStoreException(lastException);
+ }
+ }
+ if (lastException != null) {
+ throw lastException;
+ }
+ }
+
+ public void clearInUse() {
+ archiveDataStore.clearInUse();
+ }
+
+ public int getMaxAge() {
+ return maxAge;
+ }
+
+ public void setMaxAge(int maxAge) {
+ this.maxAge = maxAge;
+ }
+
+ public int getMoveDataTaskSleep() {
+ return moveDataTaskSleep;
+ }
+
+ public int getMoveDataTaskFirstRunHourOfDay() {
+ return moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY);
+ }
+
+ public void setMoveDataTaskSleep(int sleep) {
+ this.moveDataTaskSleep = sleep;
+ }
+
+ public void setMoveDataTaskFirstRunHourOfDay(int hourOfDay) {
+ moveDataTaskNextRun = Calendar.getInstance();
+ if (moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY) >= hourOfDay) {
+ moveDataTaskNextRun.add(Calendar.DAY_OF_MONTH, 1);
+ }
+ moveDataTaskNextRun.set(Calendar.HOUR_OF_DAY, hourOfDay);
+ moveDataTaskNextRun.set(Calendar.MINUTE, 0);
+ moveDataTaskNextRun.set(Calendar.SECOND, 0);
+ moveDataTaskNextRun.set(Calendar.MILLISECOND, 0);
+ }
+
+ public void setSleepBetweenRecords(long millis) {
+ this.sleepBetweenRecords = millis;
+ }
+
+ public long getSleepBetweenRecords() {
+ return sleepBetweenRecords;
+ }
+
+ public boolean isDelayedDelete() {
+ return delayedDelete;
+ }
+
+ public void setDelayedDelete(boolean delayedDelete) {
+ this.delayedDelete = delayedDelete;
+ }
+
+ public long getDelayedDeleteSleep() {
+ return delayedDeleteSleep;
+ }
+
+ public void setDelayedDeleteSleep(long delayedDeleteSleep) {
+ this.delayedDeleteSleep = delayedDeleteSleep;
+ }
+
+ /**
+ * Writes the given DataIdentifier to the delayedDeletedFile.
+ *
+ * @param identifier
+ * @return boolean true if it was successful otherwise false
+ */
+ private boolean writeDelayedDataIdentifier(DataIdentifier identifier) {
+ BufferedWriter writer = null;
+ try {
+ File identifierFile = new File(
+ ((LocalFileSystem) identifiersToDeleteFile.getFileSystem()).getPath(),
+ identifiersToDeleteFile.getPath());
+ writer = new BufferedWriter(new FileWriter(identifierFile, true));
+ return true;
+ } catch (Exception e) {
+ log.warn("I/O error while saving DataIdentifier (stacktrace on DEBUG log level) to '"
+ + identifiersToDeleteFile.getPath() + "': " + e.getMessage());
+ log.debug("Root cause: ", e);
+ return false;
+ } finally {
+ IOUtils.closeQuietly(writer);
+ }
+ }
+
+ /**
+ * Purges the delayedDeletedFile.
+ *
+ * @return boolean true if it was successful otherwise false
+ */
+ private boolean purgeDelayedDeleteFile() {
+ BufferedWriter writer = null;
+ try {
+ writer = new BufferedWriter(new OutputStreamWriter(
+ identifiersToDeleteFile.getOutputStream()));
+ writer.write("");
+ return true;
+ } catch (Exception e) {
+ log.warn("I/O error while purging (stacktrace on DEBUG log level) the "
+ + IDENTIFIERS_TO_DELETE_FILE_KEY + " file '"
+ + identifiersToDeleteFile.getPath() + "': " + e.getMessage());
+ log.debug("Root cause: ", e);
+ return false;
+ } finally {
+ IOUtils.closeQuietly(writer);
+ }
+ }
+
+ /**
+ * Class for maintaining the MultiDataStore. It will be used to move the
+ * content of the primary data store to the archive data store.
+ */
+ public class MoveDataTask implements Runnable {
+
+ /**
+ * {@inheritDoc}
+ */
+ public void run() {
+ while (!Thread.currentThread().isInterrupted()) {
+ try {
+ log.info("Next move-data task run scheduled at "
+ + moveDataTaskNextRun.getTime());
+ long sleepTime = moveDataTaskNextRun.getTimeInMillis()
+ - System.currentTimeMillis();
+ if (sleepTime > 0) {
+ Thread.sleep(sleepTime);
+ }
+ moveDataTaskRunning = true;
+ moveOutdatedData();
+ moveDataTaskRunning = false;
+ moveDataTaskNextRun.add(Calendar.SECOND, moveDataTaskSleep);
+ if (delayedDelete) {
+ if (deleteDelayedIdentifiersTaskThread != null
+ && deleteDelayedIdentifiersTaskThread.isAlive()) {
+ log.warn("The DeleteDelayedIdentifiersTask-Thread is already running.");
+ } else {
+ deleteDelayedIdentifiersTaskThread = new Thread(
+ new DeleteDelayedIdentifiersTask(),
+ "Jackrabbit-MultiDataStore-DeleteDelayedIdentifiersTaskThread");
+ deleteDelayedIdentifiersTaskThread.setDaemon(true);
+ deleteDelayedIdentifiersTaskThread.start();
+ }
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ log.warn("Interrupted: stopping move-data task.");
+ }
+
+ /**
+ * Moves outdated data from primary to archive data store
+ */
+ protected void moveOutdatedData() {
+ try {
+ long now = System.currentTimeMillis();
+ long maxAgeMilli = 1000 * 60 * 60 * 24 * maxAge;
+ Iterator<DataIdentifier> allIdentifiers = primaryDataStore.getAllIdentifiers();
+ int moved = 0;
+ while (allIdentifiers.hasNext()) {
+ DataIdentifier identifier = allIdentifiers.next();
+ DataRecord dataRecord = primaryDataStore.getRecord(identifier);
+ if ((dataRecord.getLastModified() + maxAgeMilli) < now) {
+ try {
+ moveDataTaskLock.lock();
+ if (delayedDelete) {
+ // first write it to the file and then add it to
+ // the archive data store ...
+ if (writeDelayedDataIdentifier(identifier)) {
+ archiveDataStore.addRecord(dataRecord.getStream());
+ moved++;
+ }
+ } else {
+ // first add it and then delete it .. not really
+ // atomic ...
+ archiveDataStore.addRecord(dataRecord.getStream());
+ ((MultiDataStoreAware) primaryDataStore).deleteRecord(identifier);
+ moved++;
+ }
+ } catch (DataStoreException e) {
+ log.error("Failed to move DataRecord. DataIdentifier: " + identifier, e);
+ } finally {
+ moveDataTaskLock.unlock();
+ }
+ }
+ // Give other threads time to use the MultiDataStore while
+ // MoveDataTask is running..
+ Thread.sleep(sleepBetweenRecords);
+ }
+ if (delayedDelete) {
+ log.info("Moved "
+ + moved
+ + " DataRecords to the archive data store. The DataRecords in the primary data store will be removed in "
+ + delayedDeleteSleep + " seconds.");
+ } else {
+ log.info("Moved " + moved + " DataRecords to the archive data store.");
+ }
+ } catch (Exception e) {
+ log.warn("Failed to run move-data task.", e);
+ }
+ }
+ }
+
+ /**
+ * Class to clean up the delayed DataRecords from the primary data store.
+ */
+ public class DeleteDelayedIdentifiersTask implements Runnable {
+
+ boolean run = true;
+
+ @Override
+ public void run() {
+ if (moveDataTaskRunning) {
+ log.warn("It's not supported to run the DeleteDelayedIdentifiersTask while the MoveDataTask is running.");
+ return;
+ }
+ while (run && !Thread.currentThread().isInterrupted()) {
+ BufferedReader reader = null;
+ ArrayList<DataIdentifier> problemIdentifiers = new ArrayList<DataIdentifier>();
+ try {
+ int deleted = 0;
+ reader = new BufferedReader(new InputStreamReader(
+ identifiersToDeleteFile.getInputStream()));
+ while (true) {
+ String s = reader.readLine();
+ if (s == null || s.equals("")) {
+ break;
+ }
+ DataIdentifier identifier = new DataIdentifier(s);
+ try {
+ moveDataTaskLock.lock();
+ ((MultiDataStoreAware) primaryDataStore).deleteRecord(identifier);
+ deleted++;
+ } catch (DataStoreException e) {
+ log.error("Failed to delete DataRecord. DataIdentifier: " + identifier,
+ e);
+ problemIdentifiers.add(identifier);
+ } finally {
+ moveDataTaskLock.unlock();
+ }
+ // Give other threads time to use the MultiDataStore
+ // while
+ // DeleteDelayedIdentifiersTask is running..
+ Thread.sleep(sleepBetweenRecords);
+ }
+ log.info("Deleted " + deleted + " DataRecords from the primary data store.");
+ if (problemIdentifiers.isEmpty()) {
+ try {
+ identifiersToDeleteFile.delete();
+ } catch (FileSystemException e) {
+ log.warn("Unable to delete the " + IDENTIFIERS_TO_DELETE_FILE_KEY
+ + " File.");
+ if (!purgeDelayedDeleteFile()) {
+ log.error("Unable to purge the " + IDENTIFIERS_TO_DELETE_FILE_KEY
+ + " File.");
+ }
+ }
+ } else {
+ if (purgeDelayedDeleteFile()) {
+ for (int x = 0; x < problemIdentifiers.size(); x++) {
+ writeDelayedDataIdentifier(problemIdentifiers.get(x));
+ }
+ }
+ }
+ } catch (InterruptedException e) {
+ log.warn("Interrupted: stopping delayed-delete task.");
+ Thread.currentThread().interrupt();
+ } catch (Exception e) {
+ log.warn("Failed to run delayed-delete task.", e);
+ } finally {
+ IOUtils.closeQuietly(reader);
+ run = false;
+ }
+ }
+ }
+ }
}
Modified: jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStoreAware.java
URL: http://svn.apache.org/viewvc/jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStoreAware.java?rev=1414155&r1=1414154&r2=1414155&view=diff
==============================================================================
--- jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStoreAware.java (original)
+++ jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStoreAware.java Tue Nov 27 12:39:14 2012
@@ -20,18 +20,20 @@ import org.apache.jackrabbit.core.data.M
/**
* To use a DataStore within a MultiDataStore it must implement this
- * MultiDataStoreAware Interface. It extends a DataStore to delete a
- * single DataRecord.
+ * MultiDataStoreAware Interface. It extends a DataStore to delete a single
+ * DataRecord.
*/
public interface MultiDataStoreAware {
- /**
- * Deletes a single DataRecord based on the given identifier. Delete
- * will only be used by the {@link MoveDataTask}.
- *
- * @param identifier data identifier
- * @throws DataStoreException if the data store could not be accessed,
- * or if the given identifier is invalid
+ /**
+ * Deletes a single DataRecord based on the given identifier. Delete will
+ * only be used by the {@link MoveDataTask}.
+ *
+ * @param identifier
+ * data identifier
+ * @throws DataStoreException
+ * if the data store could not be accessed, or if the given
+ * identifier is invalid
*/
void deleteRecord(DataIdentifier identifier) throws DataStoreException;