You are viewing a plain text version of this content. The canonical link for it is here.
Posted to oak-commits@jackrabbit.apache.org by al...@apache.org on 2014/06/03 10:34:11 UTC
svn commit: r1599447 - in /jackrabbit/oak/branches/1.0: ./
oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/
oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/
oak-core/src/main/java/org/apache/jackrabbit/oak/spi/sta...
Author: alexparvulescu
Date: Tue Jun 3 08:34:10 2014
New Revision: 1599447
URL: http://svn.apache.org/r1599447
Log:
OAK-1804 TarMK compaction
- merged to 1.0 branch
Added:
jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java
- copied, changed from r1598352, jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java
Modified:
jackrabbit/oak/branches/1.0/ (props changed)
jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordId.java
jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Segment.java
jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentBlob.java
jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java
jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java
jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/state/ApplyDiff.java
jackrabbit/oak/branches/1.0/oak-run/README.md
jackrabbit/oak/branches/1.0/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Main.java
Propchange: jackrabbit/oak/branches/1.0/
------------------------------------------------------------------------------
Merged /jackrabbit/oak/trunk:r1597795,1597854,1597860,1598292,1598352,1598369,1598595,1598631,1598696,1598732,1598798
Copied: jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java (from r1598352, jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java)
URL: http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java?p2=jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java&p1=jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java&r1=1598352&r2=1599447&rev=1599447&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java (original)
+++ jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java Tue Jun 3 08:34:10 2014
@@ -16,12 +16,14 @@
*/
package org.apache.jackrabbit.oak.plugins.segment;
+import static com.google.common.collect.Lists.newArrayList;
import static com.google.common.collect.Maps.newHashMap;
import static org.apache.jackrabbit.oak.api.Type.BINARIES;
import static org.apache.jackrabbit.oak.api.Type.BINARY;
import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
import java.io.IOException;
+import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@@ -29,6 +31,7 @@ import java.util.Map;
import org.apache.jackrabbit.oak.api.Blob;
import org.apache.jackrabbit.oak.api.PropertyState;
import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.commons.IOUtils;
import org.apache.jackrabbit.oak.plugins.memory.BinaryPropertyState;
import org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState;
import org.apache.jackrabbit.oak.plugins.memory.MultiBinaryPropertyState;
@@ -39,7 +42,7 @@ import org.apache.jackrabbit.oak.spi.sta
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.esotericsoftware.minlog.Log;
+import com.google.common.hash.Hashing;
/**
* Tool for compacting segments.
@@ -49,31 +52,16 @@ public class Compactor {
/** Logger instance */
private static final Logger log = LoggerFactory.getLogger(Compactor.class);
- private final SegmentStore store;
+ public static void compact(SegmentStore store) {
+ SegmentWriter writer = store.getTracker().getWriter();
+ Compactor compactor = new Compactor(writer);
- private final SegmentWriter writer;
-
- /**
- * Map from the identifiers of old records to the identifiers of their
- * compacted copies. Used to prevent the compaction code from duplicating
- * things like checkpoints that share most of their content with other
- * subtrees.
- */
- private final Map<RecordId, RecordId> compacted = newHashMap();
-
- public Compactor(SegmentStore store) {
- this.store = store;
- this.writer = store.getTracker().getWriter();
- }
-
- public void compact() throws IOException {
log.debug("TarMK compaction");
SegmentNodeBuilder builder = writer.writeNode(EMPTY_NODE).builder();
SegmentNodeState before = store.getHead();
EmptyNodeState.compareAgainstEmptyState(
- before, new CompactDiff(builder));
- System.out.println(compacted.size() + " nodes compacted");
+ before, compactor.newCompactDiff(builder));
SegmentNodeState after = builder.getNodeState();
while (!store.setHead(before, after)) {
@@ -81,13 +69,38 @@ public class Compactor {
// Rebase (and compact) those changes on top of the
// compacted state before retrying to set the head.
SegmentNodeState head = store.getHead();
- head.compareAgainstBaseState(before, new CompactDiff(builder));
- System.out.println(compacted.size() + " nodes compacted");
+ head.compareAgainstBaseState(
+ before, compactor.newCompactDiff(builder));
before = head;
after = builder.getNodeState();
}
}
+ private final SegmentWriter writer;
+
+ /**
+ * Map from the identifiers of old records to the identifiers of their
+ * compacted copies. Used to prevent the compaction code from duplicating
+ * things like checkpoints that share most of their content with other
+ * subtrees.
+ */
+ private final Map<RecordId, RecordId> compacted = newHashMap();
+
+ /**
+ * Map from {@link #getBlobKey(Blob) blob keys} to matching compacted
+ * blob record identifiers. Used to de-duplicate copies of the same
+ * binary values.
+ */
+ private final Map<String, List<RecordId>> binaries = newHashMap();
+
+ private Compactor(SegmentWriter writer) {
+ this.writer = writer;
+ }
+
+ private CompactDiff newCompactDiff(NodeBuilder builder) {
+ return new CompactDiff(builder);
+ }
+
private class CompactDiff extends ApplyDiff {
CompactDiff(NodeBuilder builder) {
@@ -122,7 +135,7 @@ public class Compactor {
boolean success = EmptyNodeState.compareAgainstEmptyState(
after, new CompactDiff(child));
- if (success && id != null) {
+ if (success && id != null && child.getChildNodeCount(1) > 0) {
RecordId compactedId =
writer.writeNode(child.getNodeState()).getRecordId();
compacted.put(id, compactedId);
@@ -148,7 +161,7 @@ public class Compactor {
boolean success = after.compareAgainstBaseState(
before, new CompactDiff(child));
- if (success && id != null) {
+ if (success && id != null && child.getChildNodeCount(1) > 0) {
RecordId compactedId =
writer.writeNode(child.getNodeState()).getRecordId();
compacted.put(id, compactedId);
@@ -177,15 +190,69 @@ public class Compactor {
}
}
+ /**
+ * Compacts (and de-duplicates) the given blob.
+ *
+ * @param blob blob to be compacted
+ * @return compacted blob
+ */
private Blob compact(Blob blob) {
if (blob instanceof SegmentBlob) {
+ SegmentBlob sb = (SegmentBlob) blob;
+
try {
- return ((SegmentBlob) blob).clone(writer);
+ // if the blob is inlined or external, just clone it
+ if (sb.isExternal() || sb.length() < Segment.MEDIUM_LIMIT) {
+ return sb.clone(writer);
+ }
+
+ // else check if we've already cloned this specific record
+ RecordId id = sb.getRecordId();
+ RecordId compactedId = compacted.get(id);
+ if (compactedId != null) {
+ return new SegmentBlob(compactedId);
+ }
+
+ // alternatively look if the exact same binary has been cloned
+ String key = getBlobKey(blob);
+ List<RecordId> ids = binaries.get(key);
+ if (ids != null) {
+ for (RecordId duplicateId : ids) {
+ if (new SegmentBlob(duplicateId).equals(blob)) {
+ return new SegmentBlob(duplicateId);
+ }
+ }
+ }
+
+ // if not, clone the blob and keep track of the result
+ sb = sb.clone(writer);
+ compacted.put(id, sb.getRecordId());
+ if (ids == null) {
+ ids = newArrayList();
+ binaries.put(key, ids);
+ }
+ ids.add(sb.getRecordId());
+
+ return sb;
} catch (IOException e) {
- Log.warn("Failed to clone a binary value", e);
+ log.warn("Failed to compcat a blob", e);
+ // fall through
}
}
+
+ // no way to compact this blob, so we'll just keep it as-is
return blob;
}
+ private String getBlobKey(Blob blob) throws IOException {
+ InputStream stream = blob.getNewStream();
+ try {
+ byte[] buffer = new byte[SegmentWriter.BLOCK_SIZE];
+ int n = IOUtils.readFully(stream, buffer, 0, buffer.length);
+ return blob.length() + ":" + Hashing.sha1().hashBytes(buffer, 0, n);
+ } finally {
+ stream.close();
+ }
+ }
+
}
Modified: jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordId.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordId.java?rev=1599447&r1=1599446&r2=1599447&view=diff
==============================================================================
--- jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordId.java (original)
+++ jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordId.java Tue Jun 3 08:34:10 2014
@@ -81,6 +81,13 @@ public final class RecordId implements C
return segmentId + ":" + offset;
}
+ /**
+ * Returns the record id string representation used in Oak 1.0.
+ */
+ public String toString10() {
+ return String.format("%s:%d", segmentId, offset);
+ }
+
@Override
public int hashCode() {
return segmentId.hashCode() ^ offset;
Modified: jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Segment.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Segment.java?rev=1599447&r1=1599446&r2=1599447&view=diff
==============================================================================
--- jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Segment.java (original)
+++ jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Segment.java Tue Jun 3 08:34:10 2014
@@ -86,7 +86,7 @@ public class Segment {
* value. And since small values are never stored as medium ones, we can
* extend the size range to cover that many longer values.
*/
- static final int MEDIUM_LIMIT = (1 << (16 - 2)) + SMALL_LIMIT;
+ public static final int MEDIUM_LIMIT = (1 << (16 - 2)) + SMALL_LIMIT;
public static int REF_COUNT_OFFSET = 5;
@@ -447,10 +447,30 @@ public class Segment {
int length = data.remaining();
writer.format("Segment %s (%d bytes)%n", id, length);
- writer.println("--------------------------------------------------------------------------");
- int refcount = getRefCount();
- for (int refid = 0; refid < refcount; refid++) {
- writer.format("reference %02x: %s%n", refid, getRefId(refid));
+ if (id.isDataSegmentId()) {
+ writer.println("--------------------------------------------------------------------------");
+ int refcount = getRefCount();
+ for (int refid = 0; refid < refcount; refid++) {
+ writer.format("reference %02x: %s%n", refid, getRefId(refid));
+ }
+ int rootcount = data.getShort(ROOT_COUNT_OFFSET) & 0xffff;
+ int pos = refcount * 16;
+ for (int rootid = 0; rootid < rootcount; rootid++) {
+ writer.format(
+ "root %d: %s at %04x%n", rootid,
+ RecordType.values()[data.get(pos + rootid * 3) & 0xff],
+ data.getShort(pos + rootid * 3 + 1) & 0xffff);
+ }
+ int blobrefcount = data.getShort(BLOBREF_COUNT_OFFSET) & 0xffff;
+ pos += rootcount * 3;
+ for (int blobrefid = 0; blobrefid < blobrefcount; blobrefid++) {
+ int offset = data.getShort(pos + blobrefid * 2) & 0xffff;
+ SegmentBlob blob = new SegmentBlob(
+ new RecordId(id, offset << RECORD_ALIGN_BITS));
+ writer.format(
+ "blobref %d: %s at %04x%n", blobrefid,
+ blob.getBlobId(), offset);
+ }
}
writer.println("--------------------------------------------------------------------------");
int pos = data.limit() - ((length + 15) & ~15);
Modified: jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentBlob.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentBlob.java?rev=1599447&r1=1599446&r2=1599447&view=diff
==============================================================================
--- jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentBlob.java (original)
+++ jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentBlob.java Tue Jun 3 08:34:10 2014
@@ -21,6 +21,8 @@ import static org.apache.jackrabbit.oak.
import static org.apache.jackrabbit.oak.plugins.segment.Segment.SMALL_LIMIT;
import static org.apache.jackrabbit.oak.plugins.segment.SegmentWriter.BLOCK_SIZE;
+import java.io.BufferedInputStream;
+import java.io.IOException;
import java.io.InputStream;
import javax.annotation.CheckForNull;
@@ -30,7 +32,7 @@ import org.apache.jackrabbit.oak.api.Blo
import org.apache.jackrabbit.oak.plugins.memory.AbstractBlob;
import org.apache.jackrabbit.oak.spi.blob.BlobStore;
-class SegmentBlob extends Record implements Blob {
+public class SegmentBlob extends Record implements Blob {
SegmentBlob(RecordId id) {
super(id);
@@ -126,6 +128,14 @@ class SegmentBlob extends Record impleme
return getRecordId().toString();
}
+ public boolean isExternal() {
+ Segment segment = getSegment();
+ int offset = getOffset();
+ byte head = segment.readByte(offset);
+ // 1110 xxxx: external value
+ return (head & 0xf0) == 0xe0;
+ }
+
public String getBlobId() {
Segment segment = getSegment();
int offset = getOffset();
@@ -138,6 +148,32 @@ class SegmentBlob extends Record impleme
}
}
+ public SegmentBlob clone(SegmentWriter writer) throws IOException {
+ Segment segment = getSegment();
+ int offset = getOffset();
+ byte head = segment.readByte(offset);
+ if ((head & 0x80) == 0x00) {
+ // 0xxx xxxx: small value
+ return writer.writeStream(new BufferedInputStream(getNewStream()));
+ } else if ((head & 0xc0) == 0x80) {
+ // 10xx xxxx: medium value
+ return writer.writeStream(new BufferedInputStream(getNewStream()));
+ } else if ((head & 0xe0) == 0xc0) {
+ // 110x xxxx: long value
+ long length = (segment.readLong(offset) & 0x1fffffffffffffffL) + MEDIUM_LIMIT;
+ int listSize = (int) ((length + BLOCK_SIZE - 1) / BLOCK_SIZE);
+ ListRecord list = new ListRecord(
+ segment.readRecordId(offset + 8), listSize);
+ return writer.writeLargeBlob(length, list.getEntries());
+ } else if ((head & 0xf0) == 0xe0) {
+ // 1110 xxxx: external value
+ return writer.writeExternalBlob(getBlobId());
+ } else {
+ throw new IllegalStateException(String.format(
+ "Unexpected value record type: %02x", head & 0xff));
+ }
+ }
+
//------------------------------------------------------------< Object >--
@Override
Modified: jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java?rev=1599447&r1=1599446&r2=1599447&view=diff
==============================================================================
--- jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java (original)
+++ jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java Tue Jun 3 08:34:10 2014
@@ -717,6 +717,20 @@ public class SegmentWriter {
return writeStream(blob.getNewStream());
}
+ SegmentBlob writeExternalBlob(String blobId) throws IOException {
+ RecordId id = writeValueRecord(blobId);
+ return new SegmentBlob(id);
+ }
+
+ SegmentBlob writeLargeBlob(long length, List<RecordId> list) {
+ RecordId id = writeValueRecord(length, writeList(list));
+ return new SegmentBlob(id);
+ }
+
+ public synchronized void dropCache(){
+ records.clear();
+ }
+
/**
* Writes a stream value record. The given stream is consumed
* <em>and closed</em> by this method.
Modified: jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java?rev=1599447&r1=1599446&r2=1599447&view=diff
==============================================================================
--- jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java (original)
+++ jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java Tue Jun 3 08:34:10 2014
@@ -343,7 +343,7 @@ public class FileStore implements Segmen
synchronized (this) {
log.debug("TarMK journal update {} -> {}", before, after);
- journalFile.writeBytes(after + " root\n");
+ journalFile.writeBytes(after.toString10() + " root\n");
journalFile.getChannel().force(false);
persistedHead.set(after);
@@ -591,4 +591,5 @@ public class FileStore implements Segmen
System.gc();
cleanupNeeded.set(true);
}
-}
\ No newline at end of file
+
+}
Modified: jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/state/ApplyDiff.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/state/ApplyDiff.java?rev=1599447&r1=1599446&r2=1599447&view=diff
==============================================================================
--- jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/state/ApplyDiff.java (original)
+++ jackrabbit/oak/branches/1.0/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/state/ApplyDiff.java Tue Jun 3 08:34:10 2014
@@ -32,7 +32,7 @@ import org.apache.jackrabbit.oak.api.Pro
* NodeState base = ...;
* NodeState target = ...;
* NodeBuilder builder = base.builder();
- * target.compareAgainstBaseState(base, new ReapplyDiff(builder));
+ * target.compareAgainstBaseState(base, new ApplyDiff(builder));
* assertEquals(target, builder.getNodeState());
* </pre>
* <p>
@@ -47,7 +47,7 @@ import org.apache.jackrabbit.oak.api.Pro
*/
public class ApplyDiff implements NodeStateDiff {
- private final NodeBuilder builder;
+ protected final NodeBuilder builder;
public ApplyDiff(NodeBuilder builder) {
this.builder = builder;
Modified: jackrabbit/oak/branches/1.0/oak-run/README.md
URL: http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.0/oak-run/README.md?rev=1599447&r1=1599446&r2=1599447&view=diff
==============================================================================
--- jackrabbit/oak/branches/1.0/oak-run/README.md (original)
+++ jackrabbit/oak/branches/1.0/oak-run/README.md Tue Jun 3 08:34:10 2014
@@ -30,6 +30,14 @@ store. Currently this is only supported
$ java -jar oak-run-*.jar debug /path/to/oak/repository [id...]
+Compact
+-------
+
+The 'compact' mode runs the segment compaction operation on the provided TarMK
+repository. To start this mode, use:
+
+ $ java -jar oak-run-*.jar compact /path/to/oak/repository
+
Upgrade
-------
Modified: jackrabbit/oak/branches/1.0/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Main.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/branches/1.0/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Main.java?rev=1599447&r1=1599446&r2=1599447&view=diff
==============================================================================
--- jackrabbit/oak/branches/1.0/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Main.java (original)
+++ jackrabbit/oak/branches/1.0/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Main.java Tue Jun 3 08:34:10 2014
@@ -19,6 +19,7 @@ package org.apache.jackrabbit.oak.run;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
+import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
@@ -53,6 +54,7 @@ import org.apache.jackrabbit.oak.jcr.Jcr
import org.apache.jackrabbit.oak.plugins.backup.FileStoreBackup;
import org.apache.jackrabbit.oak.plugins.document.DocumentMK;
import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore;
+import org.apache.jackrabbit.oak.plugins.segment.Compactor;
import org.apache.jackrabbit.oak.plugins.segment.RecordId;
import org.apache.jackrabbit.oak.plugins.segment.Segment;
import org.apache.jackrabbit.oak.plugins.segment.SegmentId;
@@ -102,6 +104,9 @@ public class Main {
case DEBUG:
debug(args);
break;
+ case COMPACT:
+ compact(args);
+ break;
case SERVER:
server(URI, args);
break;
@@ -155,6 +160,36 @@ public class Main {
}
}
+ private static void compact(String[] args) throws IOException {
+ if (args.length != 1) {
+ System.err.println("usage: compact <path>");
+ System.exit(1);
+ } else {
+ File directory = new File(args[0]);
+ System.out.println("Compacting " + directory);
+ System.out.println(" before " + Arrays.toString(directory.list()));
+
+ System.out.println(" -> compacting");
+ FileStore store = new FileStore(directory, 256, false);
+ try {
+ Compactor.compact(store);
+ } finally {
+ store.close();
+ }
+
+ System.out.println(" -> cleaning up");
+ store = new FileStore(directory, 256, false);
+ try {
+ store.gc();
+ store.flush();
+ } finally {
+ store.close();
+ }
+
+ System.out.println(" after " + Arrays.toString(directory.list()));
+ }
+ }
+
private static void debug(String[] args) throws IOException {
if (args.length == 0) {
System.err.println("usage: debug <path> [id...]");
@@ -481,6 +516,7 @@ public class Main {
BACKUP("backup"),
BENCHMARK("benchmark"),
DEBUG("debug"),
+ COMPACT("compact"),
SERVER("server"),
UPGRADE("upgrade");