You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2011/07/26 20:23:53 UTC
svn commit: r1151192 - in /hadoop/common/branches/HDFS-1073/hdfs/src:
java/org/apache/hadoop/hdfs/server/namenode/
test/hdfs/org/apache/hadoop/hdfs/server/namenode/
Author: todd
Date: Tue Jul 26 18:23:51 2011
New Revision: 1151192
URL: http://svn.apache.org/viewvc?rev=1151192&view=rev
Log:
Rename StorageArchiver to StoragePurger as suggested by Matt and Ivan in the comments on HDFS-1073
Modified:
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageArchivalManager.java
hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalFunctional.java
hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalManager.java
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java?rev=1151192&r1=1151191&r2=1151192&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java Tue Jul 26 18:23:51 2011
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.na
import java.io.IOException;
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StorageArchiver;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
/**
@@ -54,7 +54,7 @@ class BackupJournalManager implements Jo
}
@Override
- public void archiveLogsOlderThan(long minTxIdToKeep, StorageArchiver archiver)
+ public void purgeLogsOlderThan(long minTxIdToKeep, StoragePurger purger)
throws IOException {
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1151192&r1=1151191&r2=1151192&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Tue Jul 26 18:23:51 2011
@@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StorageArchiver;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
@@ -870,24 +870,24 @@ public class FSEditLog {
/**
* Archive any log files that are older than the given txid.
*/
- public void archiveLogsOlderThan(
- final long minTxIdToKeep, final StorageArchiver archiver) {
+ public void purgeLogsOlderThan(
+ final long minTxIdToKeep, final StoragePurger purger) {
synchronized (this) {
// synchronized to prevent findbugs warning about inconsistent
// synchronization. This will be JIT-ed out if asserts are
// off.
assert curSegmentTxId == FSConstants.INVALID_TXID || // on format this is no-op
minTxIdToKeep <= curSegmentTxId :
- "cannot archive logs older than txid " + minTxIdToKeep +
+ "cannot purge logs older than txid " + minTxIdToKeep +
" when current segment starts at " + curSegmentTxId;
}
mapJournalsAndReportErrors(new JournalClosure() {
@Override
public void apply(JournalAndStream jas) throws IOException {
- jas.manager.archiveLogsOlderThan(minTxIdToKeep, archiver);
+ jas.manager.purgeLogsOlderThan(minTxIdToKeep, purger);
}
- }, "archiving logs older than " + minTxIdToKeep);
+ }, "purging logs older than " + minTxIdToKeep);
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1151192&r1=1151191&r2=1151192&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Tue Jul 26 18:23:51 2011
@@ -853,18 +853,18 @@ public class FSImage implements Closeabl
// Since we now have a new checkpoint, we can clean up some
// old edit logs and checkpoints.
- archiveOldStorage();
+ purgeOldStorage();
}
/**
- * Archive any files in the storage directories that are no longer
+ * Purge any files in the storage directories that are no longer
* necessary.
*/
- public void archiveOldStorage() {
+ public void purgeOldStorage() {
try {
- archivalManager.archiveOldStorage();
+ archivalManager.purgeOldStorage();
} catch (Exception e) {
- LOG.warn("Unable to archive old storage", e);
+ LOG.warn("Unable to purge old storage", e);
}
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java?rev=1151192&r1=1151191&r2=1151192&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java Tue Jul 26 18:23:51 2011
@@ -27,7 +27,7 @@ import java.util.List;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundEditLog;
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StorageArchiver;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -91,7 +91,7 @@ class FileJournalManager implements Jour
}
@Override
- public void archiveLogsOlderThan(long minTxIdToKeep, StorageArchiver archiver)
+ public void purgeLogsOlderThan(long minTxIdToKeep, StoragePurger purger)
throws IOException {
File[] files = FileUtil.listFiles(sd.getCurrentDir());
List<FoundEditLog> editLogs =
@@ -99,7 +99,7 @@ class FileJournalManager implements Jour
for (FoundEditLog log : editLogs) {
if (log.getStartTxId() < minTxIdToKeep &&
log.getLastTxId() < minTxIdToKeep) {
- archiver.archiveLog(log);
+ purger.purgeLog(log);
}
}
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java?rev=1151192&r1=1151191&r2=1151192&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java Tue Jul 26 18:23:51 2011
@@ -150,7 +150,7 @@ public class GetImageServlet extends Htt
// Now that we have a new checkpoint, we might be able to
// remove some old ones.
- nnImage.archiveOldStorage();
+ nnImage.purgeOldStorage();
} finally {
currentlyDownloadingCheckpoints.remove(txid);
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java?rev=1151192&r1=1151191&r2=1151192&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java Tue Jul 26 18:23:51 2011
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.na
import java.io.IOException;
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StorageArchiver;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
/**
* A JournalManager is responsible for managing a single place of storing
@@ -52,10 +52,10 @@ interface JournalManager {
*
* @param minTxIdToKeep the earliest txid that must be retained after purging
* old logs
- * @param archiver the archival implementation to use
+ * @param purger the purging implementation to use
* @throws IOException if purging fails
*/
- void archiveLogsOlderThan(long minTxIdToKeep, StorageArchiver archiver)
+ void purgeLogsOlderThan(long minTxIdToKeep, StoragePurger purger)
throws IOException;
/**
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageArchivalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageArchivalManager.java?rev=1151192&r1=1151191&r2=1151192&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageArchivalManager.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageArchivalManager.java Tue Jul 26 18:23:51 2011
@@ -39,7 +39,7 @@ import com.google.common.collect.Sets;
* directories of the NN and enforcing a retention policy on checkpoints
* and edit logs.
*
- * It delegates the actual removal of files to a StorageArchiver
+ * It delegates the actual removal of files to a StoragePurger
* implementation, which might delete the files or instead copy them to
* a filer or HDFS for later analysis.
*/
@@ -48,47 +48,47 @@ public class NNStorageArchivalManager {
private final int numCheckpointsToRetain;
private static final Log LOG = LogFactory.getLog(NNStorageArchivalManager.class);
private final NNStorage storage;
- private final StorageArchiver archiver;
+ private final StoragePurger purger;
private final FSEditLog editLog;
public NNStorageArchivalManager(
Configuration conf,
NNStorage storage,
FSEditLog editLog,
- StorageArchiver archiver) {
+ StoragePurger purger) {
this.numCheckpointsToRetain = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY,
DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_DEFAULT);
this.storage = storage;
this.editLog = editLog;
- this.archiver = archiver;
+ this.purger = purger;
}
public NNStorageArchivalManager(Configuration conf, NNStorage storage,
FSEditLog editLog) {
- this(conf, storage, editLog, new DeletionStorageArchiver());
+ this(conf, storage, editLog, new DeletionStoragePurger());
}
- public void archiveOldStorage() throws IOException {
+ public void purgeOldStorage() throws IOException {
FSImageTransactionalStorageInspector inspector =
new FSImageTransactionalStorageInspector();
storage.inspectStorageDirs(inspector);
long minImageTxId = getImageTxIdToRetain(inspector);
- archiveCheckpointsOlderThan(inspector, minImageTxId);
+ purgeCheckpointsOlderThan(inspector, minImageTxId);
// If fsimage_N is the image we want to keep, then we need to keep
// all txns > N. We can remove anything < N+1, since fsimage_N
// reflects the state up to and including N.
- editLog.archiveLogsOlderThan(minImageTxId + 1, archiver);
+ editLog.purgeLogsOlderThan(minImageTxId + 1, purger);
}
- private void archiveCheckpointsOlderThan(
+ private void purgeCheckpointsOlderThan(
FSImageTransactionalStorageInspector inspector,
long minTxId) {
for (FoundFSImage image : inspector.getFoundImages()) {
if (image.getTxId() < minTxId) {
LOG.info("Purging old image " + image);
- archiver.archiveImage(image);
+ purger.purgeImage(image);
}
}
}
@@ -120,21 +120,21 @@ public class NNStorageArchivalManager {
}
/**
- * Interface responsible for archiving old checkpoints and edit logs.
+ * Interface responsible for disposing of old checkpoints and edit logs.
*/
- static interface StorageArchiver {
- void archiveLog(FoundEditLog log);
- void archiveImage(FoundFSImage image);
+ static interface StoragePurger {
+ void purgeLog(FoundEditLog log);
+ void purgeImage(FoundFSImage image);
}
- static class DeletionStorageArchiver implements StorageArchiver {
+ static class DeletionStoragePurger implements StoragePurger {
@Override
- public void archiveLog(FoundEditLog log) {
+ public void purgeLog(FoundEditLog log) {
deleteOrWarn(log.getFile());
}
@Override
- public void archiveImage(FoundFSImage image) {
+ public void purgeImage(FoundFSImage image) {
deleteOrWarn(image.getFile());
deleteOrWarn(MD5FileUtils.getDigestFileForFile(image.getFile()));
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1151192&r1=1151191&r2=1151192&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Tue Jul 26 18:23:51 2011
@@ -544,7 +544,7 @@ public class SecondaryNameNode implement
// Since we've successfully checkpointed, we can remove some old
// image files
- checkpointImage.archiveOldStorage();
+ checkpointImage.purgeOldStorage();
return loadImage;
}
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalFunctional.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalFunctional.java?rev=1151192&r1=1151191&r2=1151192&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalFunctional.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalFunctional.java Tue Jul 26 18:23:51 2011
@@ -91,7 +91,7 @@ public class TestNNStorageArchivalFuncti
getInProgressEditsFileName(3));
doSaveNamespace(nn);
- LOG.info("After second save, image 0 should be archived, " +
+ LOG.info("After second save, image 0 should be purged, " +
"and image 4 should exist in both.");
assertGlobEquals(cd0, "fsimage_\\d*",
getImageFileName(2), getImageFileName(4));
@@ -110,21 +110,21 @@ public class TestNNStorageArchivalFuncti
LOG.info("Restoring accessibility of first storage dir");
sd0.setExecutable(true);
- LOG.info("nothing should have been archived in first storage dir");
+ LOG.info("nothing should have been purged in first storage dir");
assertGlobEquals(cd0, "fsimage_\\d*",
getImageFileName(2), getImageFileName(4));
assertGlobEquals(cd0, "edits_.*",
getFinalizedEditsFileName(3, 4),
getInProgressEditsFileName(5));
- LOG.info("fsimage_2 should be archived in second storage dir");
+ LOG.info("fsimage_2 should be purged in second storage dir");
assertGlobEquals(cd1, "fsimage_\\d*",
getImageFileName(4), getImageFileName(6));
assertGlobEquals(cd1, "edits_.*",
getFinalizedEditsFileName(5, 6),
getInProgressEditsFileName(7));
- LOG.info("On next save, we should archive logs from the failed dir," +
+ LOG.info("On next save, we should purge logs from the failed dir," +
" but not images, since the image directory is in failed state.");
doSaveNamespace(nn);
assertGlobEquals(cd1, "fsimage_\\d*",
Modified: hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalManager.java?rev=1151192&r1=1151191&r2=1151192&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalManager.java (original)
+++ hadoop/common/branches/HDFS-1073/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalManager.java Tue Jul 26 18:23:51 2011
@@ -31,7 +31,7 @@ import static org.apache.hadoop.hdfs.ser
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StorageArchiver;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
@@ -48,11 +48,11 @@ import com.google.common.collect.Sets;
public class TestNNStorageArchivalManager {
/**
* Test the "easy case" where we have more images in the
- * directory than we need to keep. Should archive the
+ * directory than we need to keep. Should purge the
* old ones.
*/
@Test
- public void testArchiveEasyCase() throws IOException {
+ public void testPurgeEasyCase() throws IOException {
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE_AND_EDITS);
tc.addImage("/foo1/current/" + getImageFileName(100), true);
@@ -64,7 +64,7 @@ public class TestNNStorageArchivalManage
tc.addLog("/foo1/current/" + getFinalizedEditsFileName(301,400), false);
tc.addLog("/foo1/current/" + getInProgressEditsFileName(401), false);
- // Test that other files don't get archived
+ // Test that other files don't get purged
tc.addLog("/foo1/current/VERSION", false);
runTest(tc);
}
@@ -73,7 +73,7 @@ public class TestNNStorageArchivalManage
* Same as above, but across multiple directories
*/
@Test
- public void testArchiveMultipleDirs() throws IOException {
+ public void testPurgeMultipleDirs() throws IOException {
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE_AND_EDITS);
tc.addRoot("/foo2", NameNodeDirType.IMAGE_AND_EDITS);
@@ -93,10 +93,10 @@ public class TestNNStorageArchivalManage
/**
* Test that if we have fewer fsimages than the configured
- * retention, we don't archive any of them
+ * retention, we don't purge any of them
*/
@Test
- public void testArchiveLessThanRetention() throws IOException {
+ public void testPurgeLessThanRetention() throws IOException {
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE_AND_EDITS);
tc.addImage("/foo1/current/" + getImageFileName(100), false);
@@ -132,7 +132,7 @@ public class TestNNStorageArchivalManage
}
/**
- * Test that old in-progress logs are properly archived
+ * Test that old in-progress logs are properly purged
*/
@Test
public void testOldInProgress() throws IOException {
@@ -166,45 +166,45 @@ public class TestNNStorageArchivalManage
private void runTest(TestCaseDescription tc) throws IOException {
Configuration conf = new Configuration();
- StorageArchiver mockArchiver =
- Mockito.mock(NNStorageArchivalManager.StorageArchiver.class);
- ArgumentCaptor<FoundFSImage> imagesArchivedCaptor =
+ StoragePurger mockPurger =
+ Mockito.mock(NNStorageArchivalManager.StoragePurger.class);
+ ArgumentCaptor<FoundFSImage> imagesPurgedCaptor =
ArgumentCaptor.forClass(FoundFSImage.class);
- ArgumentCaptor<FoundEditLog> logsArchivedCaptor =
+ ArgumentCaptor<FoundEditLog> logsPurgedCaptor =
ArgumentCaptor.forClass(FoundEditLog.class);
- // Ask the manager to archive files we don't need any more
+ // Ask the manager to purge files we don't need any more
new NNStorageArchivalManager(conf,
- tc.mockStorage(), tc.mockEditLog(), mockArchiver)
- .archiveOldStorage();
+ tc.mockStorage(), tc.mockEditLog(), mockPurger)
+ .purgeOldStorage();
- // Verify that it asked the archiver to remove the correct files
- Mockito.verify(mockArchiver, Mockito.atLeast(0))
- .archiveImage(imagesArchivedCaptor.capture());
- Mockito.verify(mockArchiver, Mockito.atLeast(0))
- .archiveLog(logsArchivedCaptor.capture());
+ // Verify that it asked the purger to remove the correct files
+ Mockito.verify(mockPurger, Mockito.atLeast(0))
+ .purgeImage(imagesPurgedCaptor.capture());
+ Mockito.verify(mockPurger, Mockito.atLeast(0))
+ .purgeLog(logsPurgedCaptor.capture());
// Check images
- Set<String> archivedPaths = Sets.newHashSet();
- for (FoundFSImage archived : imagesArchivedCaptor.getAllValues()) {
- archivedPaths.add(archived.getFile().toString());
+ Set<String> purgedPaths = Sets.newHashSet();
+ for (FoundFSImage purged : imagesPurgedCaptor.getAllValues()) {
+ purgedPaths.add(purged.getFile().toString());
}
- Assert.assertEquals(Joiner.on(",").join(tc.expectedArchivedImages),
- Joiner.on(",").join(archivedPaths));
+ Assert.assertEquals(Joiner.on(",").join(tc.expectedPurgedImages),
+ Joiner.on(",").join(purgedPaths));
// Check images
- archivedPaths.clear();
- for (FoundEditLog archived : logsArchivedCaptor.getAllValues()) {
- archivedPaths.add(archived.getFile().toString());
+ purgedPaths.clear();
+ for (FoundEditLog purged : logsPurgedCaptor.getAllValues()) {
+ purgedPaths.add(purged.getFile().toString());
}
- Assert.assertEquals(Joiner.on(",").join(tc.expectedArchivedLogs),
- Joiner.on(",").join(archivedPaths));
+ Assert.assertEquals(Joiner.on(",").join(tc.expectedPurgedLogs),
+ Joiner.on(",").join(purgedPaths));
}
private static class TestCaseDescription {
private Map<String, FakeRoot> dirRoots = Maps.newHashMap();
- private Set<String> expectedArchivedLogs = Sets.newHashSet();
- private Set<String> expectedArchivedImages = Sets.newHashSet();
+ private Set<String> expectedPurgedLogs = Sets.newHashSet();
+ private Set<String> expectedPurgedImages = Sets.newHashSet();
private static class FakeRoot {
NameNodeDirType type;
@@ -234,17 +234,17 @@ public class TestNNStorageArchivalManage
}
}
- void addLog(String path, boolean expectArchive) {
+ void addLog(String path, boolean expectPurge) {
addFile(path);
- if (expectArchive) {
- expectedArchivedLogs.add(path);
+ if (expectPurge) {
+ expectedPurgedLogs.add(path);
}
}
- void addImage(String path, boolean expectArchive) {
+ void addImage(String path, boolean expectPurge) {
addFile(path);
- if (expectArchive) {
- expectedArchivedImages.add(path);
+ if (expectPurge) {
+ expectedPurgedImages.add(path);
}
}
@@ -274,15 +274,15 @@ public class TestNNStorageArchivalManage
Object[] args = invocation.getArguments();
assert args.length == 2;
long txId = (Long) args[0];
- StorageArchiver archiver = (StorageArchiver) args[1];
+ StoragePurger purger = (StoragePurger) args[1];
for (JournalManager jm : jms) {
- jm.archiveLogsOlderThan(txId, archiver);
+ jm.purgeLogsOlderThan(txId, purger);
}
return null;
}
- }).when(mockLog).archiveLogsOlderThan(
- Mockito.anyLong(), (StorageArchiver) Mockito.anyObject());
+ }).when(mockLog).purgeLogsOlderThan(
+ Mockito.anyLong(), (StoragePurger) Mockito.anyObject());
return mockLog;
}
}