You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by je...@apache.org on 2018/08/01 14:04:47 UTC

[geode] branch develop updated: GEODE-5497: Generated restore.sh script fails when incremental backups are restored (#2231)

This is an automated email from the ASF dual-hosted git repository.

jensdeppe pushed a commit to branch develop
in repository https://gitbox.apache.org/repos/asf/geode.git


The following commit(s) were added to refs/heads/develop by this push:
     new 79f8145  GEODE-5497: Generated restore.sh script fails when incremental backups are restored (#2231)
79f8145 is described below

commit 79f8145483fe6201e5f0eb7046017c8de9ac1f3b
Author: Jens Deppe <jd...@pivotal.io>
AuthorDate: Wed Aug 1 07:04:42 2018 -0700

    GEODE-5497: Generated restore.sh script fails when incremental backups are restored (#2231)
---
 .../cache/backup/BackupIntegrationTest.java        | 207 ++++++++++++++-------
 .../geode/internal/cache/backup/BackupWriter.java  |   2 +
 .../cache/backup/FileSystemBackupWriter.java       |   5 +
 .../internal/cache/backup/UnixScriptGenerator.java |   2 +
 4 files changed, 146 insertions(+), 70 deletions(-)

diff --git a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java
index 26f6b68..862856e 100644
--- a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java
+++ b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/backup/BackupIntegrationTest.java
@@ -19,6 +19,7 @@ import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS;
 import static org.apache.geode.distributed.ConfigurationProperties.LOG_LEVEL;
 import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
@@ -68,7 +69,7 @@ public class BackupIntegrationTest {
   public TemporaryFolder temporaryFolder = new TemporaryFolder();
 
   private GemFireCacheImpl cache = null;
-  private File tmpDir;
+  private File incrementalDir;
   private File cacheXmlFile;
 
   private Properties props = new Properties();
@@ -83,28 +84,26 @@ public class BackupIntegrationTest {
 
   @Before
   public void setUp() throws Exception {
-    if (tmpDir == null) {
-      props.setProperty(MCAST_PORT, "0");
-      props.setProperty(LOCATORS, "");
-      tmpDir = temporaryFolder.newFolder("temp");
-      try {
-        URL url = BackupIntegrationTest.class.getResource("BackupIntegrationTest.cache.xml");
-        cacheXmlFile = new File(url.toURI().getPath());
-      } catch (URISyntaxException e) {
-        throw new ExceptionInInitializerError(e);
-      }
-      props.setProperty(CACHE_XML_FILE, cacheXmlFile.getAbsolutePath());
-      props.setProperty(LOG_LEVEL, "config"); // to keep diskPerf logs smaller
+    props.setProperty(MCAST_PORT, "0");
+    props.setProperty(LOCATORS, "");
+    incrementalDir = temporaryFolder.newFolder("incremental");
+    try {
+      URL url = BackupIntegrationTest.class.getResource("BackupIntegrationTest.cache.xml");
+      cacheXmlFile = new File(url.toURI().getPath());
+    } catch (URISyntaxException e) {
+      throw new ExceptionInInitializerError(e);
     }
+    props.setProperty(CACHE_XML_FILE, cacheXmlFile.getAbsolutePath());
+    props.setProperty(LOG_LEVEL, "config"); // to keep diskPerf logs smaller
 
     createCache();
 
-    backupDir = new File(tmpDir, getName() + "backup_Dir");
+    backupDir = temporaryFolder.newFolder("backup_Dir");
     backupDir.mkdir();
     diskDirs = new File[2];
-    diskDirs[0] = new File(tmpDir, getName() + "_diskDir1");
+    diskDirs[0] = temporaryFolder.newFolder("disk_Dir1");
     diskDirs[0].mkdir();
-    diskDirs[1] = new File(tmpDir, getName() + "_diskDir2");
+    diskDirs[1] = temporaryFolder.newFolder("disk_Dir2");
     diskDirs[1].mkdir();
   }
 
@@ -122,61 +121,80 @@ public class BackupIntegrationTest {
 
   private void destroyDiskDirs() throws IOException {
     FileUtils.deleteDirectory(diskDirs[0]);
-    diskDirs[0].mkdir();
     FileUtils.deleteDirectory(diskDirs[1]);
-    diskDirs[1].mkdir();
   }
 
   @Test
   public void testBackupAndRecover() throws Exception {
-    backupAndRecover(() -> {
+    RegionCreator regionCreator = () -> {
       createDiskStore();
       return createRegion();
-    });
-  }
+    };
 
-  @Test
-  public void testBackupAndRecoverOldConfig() throws Exception {
-    backupAndRecover(() -> {
-      createDiskStore();
-      RegionFactory regionFactory = cache.createRegionFactory();
-      regionFactory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
-      regionFactory.setDiskStoreName(DISK_STORE_NAME);
-      return regionFactory.create("region");
-    });
-  }
+    Region<Object, Object> region = regionCreator.createRegion();
 
-  private void backupAndRecover(RegionCreator regionFactory)
-      throws IOException, InterruptedException {
-    Region<Object, Object> region = regionFactory.createRegion();
+    putDataAndRollOplogs(0, region);
 
-    // Put enough data to roll some oplogs
-    for (int i = 0; i < 1024; i++) {
-      region.put(i, getBytes(i));
+    for (DiskStore store : cache.listDiskStoresIncludingRegionOwned()) {
+      store.flush();
     }
 
-    for (int i = 0; i < 512; i++) {
-      region.destroy(i);
-    }
+    cache.close();
+    createCache();
+    region = regionCreator.createRegion();
+    validateEntriesExist(region, 512, 2048);
+    validateEntriesDoNotExist(region, 0, 512);
 
-    for (int i = 1024; i < 2048; i++) {
-      region.put(i, getBytes(i));
-    }
+    BackupService backup = cache.getBackupService();
+    BackupWriter writer = getBackupWriter();
+    backup.prepareBackup(cache.getMyId(), writer);
+    backup.doBackup();
 
-    // This section of the test is for bug 43951
-    findDiskStore().forceRoll();
-    // add a put to the current crf
-    region.put("junk", "value");
-    // do a destroy of a key in a previous oplog
-    region.destroy(2047);
-    // do a destroy of the key in the current crf
-    region.destroy("junk");
-    // the current crf is now all garbage but
-    // we need to keep the drf around since the older
-    // oplog has a create that it deletes.
-    findDiskStore().forceRoll();
-    // restore the deleted entry.
-    region.put(2047, getBytes(2047));
+    // Put another key to make sure we restore
+    // from a backup that doesn't contain this key
+    region.put("A", "A");
+
+    cache.close();
+
+    // Make sure the restore script refuses to run before we destroy the files.
+    restoreBackup(backupDir, true);
+
+    // Make sure the disk store is unaffected by the failed restore
+    createCache();
+    region = regionCreator.createRegion();
+    validateEntriesExist(region, 512, 2048);
+    validateEntriesDoNotExist(region, 0, 512);
+    assertEquals("A", region.get("A"));
+
+    region.put("B", "B");
+
+    cache.close();
+    // destroy the disk directories
+    destroyDiskDirs();
+
+    // Now the restore script should work
+    restoreBackup(backupDir, false);
+
+    // Make sure the cache has the restored backup
+    createCache();
+    region = regionCreator.createRegion();
+    validateEntriesExist(region, 512, 2048);
+    validateEntriesDoNotExist(region, 0, 512);
+
+    assertNull(region.get("A"));
+    assertNull(region.get("B"));
+  }
+
+  @Test
+  public void testIncrementalBackupAndRecover() throws Exception {
+    RegionCreator regionCreator = () -> {
+      createDiskStore();
+      return createRegion();
+    };
+
+    Region<Object, Object> region = regionCreator.createRegion();
+
+    putDataAndRollOplogs(0, region);
 
     for (DiskStore store : cache.listDiskStoresIncludingRegionOwned()) {
       store.flush();
@@ -184,15 +202,27 @@ public class BackupIntegrationTest {
 
     cache.close();
     createCache();
-    region = regionFactory.createRegion();
+    region = regionCreator.createRegion();
     validateEntriesExist(region, 512, 2048);
     for (int i = 0; i < 512; i++) {
       assertNull(region.get(i));
     }
 
     BackupService backup = cache.getBackupService();
-    BackupWriter writer = getBackupWriter();
-    backup.prepareBackup(cache.getMyId(), writer);
+    BackupWriter baseWriter = getBackupWriter();
+    backup.prepareBackup(cache.getMyId(), baseWriter);
+    backup.doBackup();
+
+    // Add more data for incremental to have something to backup
+    putDataAndRollOplogs(1, region);
+
+    Properties backupProperties =
+        new BackupConfigFactory()
+            .withBaselineDirPath(baseWriter.getBackupDirectory().getParent().toString())
+            .withTargetDirPath(incrementalDir.toString()).createBackupProperties();
+    BackupWriter incrementalWriter = BackupWriterFactory.FILE_SYSTEM.createWriter(backupProperties,
+        cache.getMyId().toString());
+    backup.prepareBackup(cache.getMyId(), incrementalWriter);
     backup.doBackup();
 
     // Put another key to make sure we restore
@@ -202,11 +232,11 @@ public class BackupIntegrationTest {
     cache.close();
 
     // Make sure the restore script refuses to run before we destroy the files.
-    restoreBackup(true);
+    restoreBackup(incrementalDir, true);
 
     // Make sure the disk store is unaffected by the failed restore
     createCache();
-    region = regionFactory.createRegion();
+    region = regionCreator.createRegion();
     validateEntriesExist(region, 512, 2048);
     for (int i = 0; i < 512; i++) {
       assertNull(region.get(i));
@@ -220,20 +250,52 @@ public class BackupIntegrationTest {
     destroyDiskDirs();
 
     // Now the restore script should work
-    restoreBackup(false);
+    restoreBackup(incrementalDir, false);
 
     // Make sure the cache has the restored backup
     createCache();
-    region = regionFactory.createRegion();
+    region = regionCreator.createRegion();
     validateEntriesExist(region, 512, 2048);
-    for (int i = 0; i < 512; i++) {
-      assertNull(region.get(i));
-    }
+    validateEntriesExist(region, 2560, 4096);
+    validateEntriesDoNotExist(region, 0, 512);
+    validateEntriesDoNotExist(region, 2048, 2560);
 
     assertNull(region.get("A"));
     assertNull(region.get("B"));
   }
 
+  private void putDataAndRollOplogs(int step, Region<Object, Object> region) {
+    int startOne = step * 2048;
+    int startTwo = startOne + 1024;
+
+    // Put enough data to roll some oplogs
+    for (int i = startOne; i < startOne + 1024; i++) {
+      region.put(i, getBytes(i));
+    }
+
+    for (int i = startOne; i < startOne + 512; i++) {
+      region.destroy(i);
+    }
+
+    for (int i = startTwo; i < startTwo + 1024; i++) {
+      region.put(i, getBytes(i));
+    }
+
+    // This section of the test is for bug 43951
+    findDiskStore().forceRoll();
+    // add a put to the current crf
+    region.put("junk", "value");
+    // do a destroy of a key in a previous oplog
+    region.destroy(startTwo + 1023);
+    // do a destroy of the key in the current crf
+    region.destroy("junk");
+    // the current crf is now all garbage but
+    // we need to keep the drf around since the older
+    // oplog has a create that it deletes.
+    findDiskStore().forceRoll();
+    // restore the deleted entry.
+    region.put(startTwo + 1023, getBytes(startTwo + 1023));
+  }
 
   @Test
   public void testBackupEmptyDiskStore() throws Exception {
@@ -271,7 +333,6 @@ public class BackupIntegrationTest {
         Arrays.asList(backupDir.list()));
   }
 
-
   @Test
   public void testCompactionDuringBackup() throws Exception {
     DiskStoreFactory dsf = cache.createDiskStoreFactory();
@@ -298,7 +359,7 @@ public class BackupIntegrationTest {
 
     cache.close();
     destroyDiskDirs();
-    restoreBackup(false);
+    restoreBackup(backupDir, false);
     createCache();
     createDiskStore();
     region = createRegion();
@@ -358,7 +419,12 @@ public class BackupIntegrationTest {
       for (int j = 0; j < expected.length; j++) {
         assertEquals("Byte wrong on entry " + i + ", byte " + j, expected[j], bytes[j]);
       }
+    }
+  }
 
+  private void validateEntriesDoNotExist(Region region, int start, int end) {
+    for (int i = start; i < end; i++) {
+      assertFalse("Entry " + i + " exists", region.containsKey(i));
     }
   }
 
@@ -369,7 +435,8 @@ public class BackupIntegrationTest {
     return data;
   }
 
-  private void restoreBackup(boolean expectFailure) throws IOException, InterruptedException {
+  private void restoreBackup(File backupDir, boolean expectFailure)
+      throws IOException, InterruptedException {
     Collection<File> restoreScripts = FileUtils.listFiles(backupDir,
         new RegexFileFilter(".*restore.*"), DirectoryFileFilter.DIRECTORY);
     assertNotNull(restoreScripts);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupWriter.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupWriter.java
index c124207..8cf94f9 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupWriter.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/BackupWriter.java
@@ -30,4 +30,6 @@ public interface BackupWriter {
   void backupFiles(BackupDefinition backupDefinition) throws IOException;
 
   Path getBaselineDirectory();
+
+  Path getBackupDirectory();
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FileSystemBackupWriter.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FileSystemBackupWriter.java
index ca54034..c8111d0 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FileSystemBackupWriter.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/FileSystemBackupWriter.java
@@ -72,6 +72,11 @@ class FileSystemBackupWriter implements BackupWriter {
     return incrementalBaselineLocation.getMemberBackupLocationDir().getParent();
   }
 
+  @Override
+  public Path getBackupDirectory() {
+    return backupDirectory;
+  }
+
   private void backupAllFilesets(BackupDefinition backupDefinition) throws IOException {
     RestoreScript restoreScript = backupDefinition.getRestoreScript();
     backupDiskInitFiles(backupDefinition.getDiskInitFiles());
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java
index b45592f..1137ee5 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/backup/UnixScriptGenerator.java
@@ -50,6 +50,8 @@ class UnixScriptGenerator implements ScriptGenerator {
   @Override
   public void writeCopyFile(final BufferedWriter writer, final File backup, final File original)
       throws IOException {
+    writer.write("mkdir -p '" + original.getParent() + "'");
+    writer.newLine();
     writer.write("cp -p '" + backup + "' '" + original + "'");
     writer.newLine();
   }