You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bookkeeper.apache.org by si...@apache.org on 2017/06/21 19:07:46 UTC

bookkeeper git commit: BOOKKEEPER-1097: GC test when no WritableDirs

Repository: bookkeeper
Updated Branches:
  refs/heads/master 256e75873 -> 596966007


BOOKKEEPER-1097: GC test when no WritableDirs

- Functional test validating that Compaction takes place even if there
are no writableledgerdir but there are ledgerdirs according to
LedgerDirsManager.getWritableLedgerDirsForNewLog

- end-to-end testcase of Bookie recovery, incase of Bookie ledgerdir reaching
the threshold, and recovering by forcing the gc/compaction

Author: Charan Reddy Guttapalem <cg...@salesforce.com>

Reviewers: Enrico Olivelli <eo...@gmail.com>, Jia Zhai <None>, Sijie Guo <si...@apache.org>

This closes #188 from reddycharan/gctestnowritabledirs


Project: http://git-wip-us.apache.org/repos/asf/bookkeeper/repo
Commit: http://git-wip-us.apache.org/repos/asf/bookkeeper/commit/59696600
Tree: http://git-wip-us.apache.org/repos/asf/bookkeeper/tree/59696600
Diff: http://git-wip-us.apache.org/repos/asf/bookkeeper/diff/59696600

Branch: refs/heads/master
Commit: 59696600784c54192877a5742c6b141eadb07e26
Parents: 256e758
Author: Charan Reddy Guttapalem <cg...@salesforce.com>
Authored: Wed Jun 21 12:07:39 2017 -0700
Committer: Sijie Guo <si...@apache.org>
Committed: Wed Jun 21 12:07:39 2017 -0700

----------------------------------------------------------------------
 .../org/apache/bookkeeper/bookie/Bookie.java    |   4 +-
 .../bookie/BookieStorageThresholdTest.java      | 255 +++++++++++++++++++
 .../bookkeeper/bookie/CompactionTest.java       | 110 ++++++++
 3 files changed, 367 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bookkeeper/blob/59696600/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/Bookie.java
----------------------------------------------------------------------
diff --git a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/Bookie.java b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/Bookie.java
index 12f491d..c63333f 100644
--- a/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/Bookie.java
+++ b/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/Bookie.java
@@ -128,8 +128,8 @@ public class Bookie extends BookieCriticalThread {
     private final LedgerDirsManager ledgerDirsManager;
     private LedgerDirsManager indexDirsManager;
     
-    private final LedgerDirsMonitor ledgerMonitor;
-    private final LedgerDirsMonitor idxMonitor;
+    LedgerDirsMonitor ledgerMonitor;
+    LedgerDirsMonitor idxMonitor;
 
     // ZooKeeper client instance for the Bookie
     ZooKeeper zk;

http://git-wip-us.apache.org/repos/asf/bookkeeper/blob/59696600/bookkeeper-server/src/test/java/org/apache/bookkeeper/bookie/BookieStorageThresholdTest.java
----------------------------------------------------------------------
diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/bookie/BookieStorageThresholdTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/bookie/BookieStorageThresholdTest.java
new file mode 100644
index 0000000..e9dd6ab
--- /dev/null
+++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/bookie/BookieStorageThresholdTest.java
@@ -0,0 +1,255 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+package org.apache.bookkeeper.bookie;
+
+import static org.junit.Assert.*;
+
+import java.io.File;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.bookkeeper.bookie.LedgerDirsManager.LedgerDirsListener;
+import org.apache.bookkeeper.bookie.LedgerDirsManager.NoWritableLedgerDirException;
+import org.apache.bookkeeper.client.BookKeeper.DigestType;
+import org.apache.bookkeeper.client.LedgerHandle;
+import org.apache.bookkeeper.test.BookKeeperClusterTestCase;
+import org.apache.bookkeeper.util.DiskChecker;
+import org.apache.bookkeeper.util.TestUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+public class BookieStorageThresholdTest extends BookKeeperClusterTestCase {
+
+    static int NUM_BOOKIES = 1;
+    static int NUM_ENTRIES = 100;
+    static int ENTRY_SIZE = 1024;
+
+    final String msg;
+    DigestType digestType = DigestType.CRC32;
+
+    public BookieStorageThresholdTest() {
+        super(NUM_BOOKIES);
+        // a dummy message
+        StringBuilder msgSB = new StringBuilder();
+        for (int i = 0; i < ENTRY_SIZE; i++) {
+            msgSB.append("a");
+        }
+        msg = msgSB.toString();
+    }
+
+    @Before
+    @Override
+    public void setUp() throws Exception {
+        // Set up the configuration properties needed.
+        baseConf.setEntryLogSizeLimit(NUM_ENTRIES * ENTRY_SIZE);
+        baseConf.setFlushInterval(500);
+        // setting very high intervals for GC intervals, so GC/compaction is not invoked by regular scheduler
+        baseConf.setGcWaitTime(60000);
+        baseConf.setMinorCompactionInterval(600000);
+        baseConf.setMajorCompactionInterval(700000);
+        baseConf.setEntryLogFilePreAllocationEnabled(false);
+        baseConf.setLedgerStorageClass(InterleavedLedgerStorage.class.getName());
+        // set isForceGCAllowWhenNoSpace to true, which will forceGC when a disk is full (or when all disks are full)
+        baseConf.setIsForceGCAllowWhenNoSpace(true);
+        // keep some lower value for DiskCheckInterval, so DiskChecker checks quite often
+        baseConf.setDiskCheckInterval(3000);
+
+        super.setUp();
+    }
+
+    LedgerHandle[] prepareData(int numEntryLogs) throws Exception {
+        // since an entry log file can hold at most 100 entries
+        // first ledger write 2 entries, which is less than low water mark
+        int num1 = 2;
+        // third ledger write more than high water mark entries
+        int num3 = (int) (NUM_ENTRIES * 0.7f);
+        // second ledger write remaining entries, which is higher than low water
+        // mark and less than high water mark
+        int num2 = NUM_ENTRIES - num3 - num1;
+
+        LedgerHandle[] lhs = new LedgerHandle[3];
+        for (int i = 0; i < 3; ++i) {
+            lhs[i] = bkc.createLedger(NUM_BOOKIES, NUM_BOOKIES, digestType, "".getBytes());
+        }
+
+        for (int n = 0; n < numEntryLogs; n++) {
+            for (int k = 0; k < num1; k++) {
+                lhs[0].addEntry(msg.getBytes());
+            }
+            for (int k = 0; k < num2; k++) {
+                lhs[1].addEntry(msg.getBytes());
+            }
+            for (int k = 0; k < num3; k++) {
+                lhs[2].addEntry(msg.getBytes());
+            }
+        }
+
+        return lhs;
+    }
+
+    public class ThresholdTestDiskChecker extends DiskChecker {
+
+        final AtomicBoolean injectDiskOutOfSpaceException;
+
+        public ThresholdTestDiskChecker(float threshold, float warnThreshold) {
+            super(threshold, warnThreshold);
+            injectDiskOutOfSpaceException = new AtomicBoolean();
+        }
+
+        public void setInjectDiskOutOfSpaceException(boolean setValue) {
+            injectDiskOutOfSpaceException.set(setValue);
+        }
+
+        @Override
+        public float checkDir(File dir) throws DiskErrorException, DiskOutOfSpaceException, DiskWarnThresholdException {
+            if (injectDiskOutOfSpaceException.get()) {
+                throw new DiskOutOfSpaceException("Injected DiskOutOfSpaceException",
+                        baseConf.getDiskUsageThreshold() + 2);
+            }
+            return super.checkDir(dir);
+        }
+    }
+
+    @Test(timeout = 60000)
+    public void testStorageThresholdCompaction() throws Exception {
+
+        // We are having BKCluster with just one bookie (NUM_BOOKIES = 1).
+        Bookie bookie = bs.get(0).getBookie();
+        // since we are going to set dependency injected ledgermonitor, so we need to shutdown
+        // the ledgermonitor which was created as part of the initialization of Bookie
+        bookie.ledgerMonitor.shutdown();
+
+        LedgerDirsManager ledgerDirsManager = bookie.getLedgerDirsManager();
+
+        // flag latches
+        final CountDownLatch diskWritable = new CountDownLatch(1);
+        final CountDownLatch diskFull = new CountDownLatch(1);
+        ledgerDirsManager.addLedgerDirsListener(new LedgerDirsListener() {
+            @Override
+            public void fatalError() {
+            }
+
+            @Override
+            public void diskWritable(File disk) {
+                diskWritable.countDown();
+            }
+
+            @Override
+            public void diskJustWritable(File disk) {
+            }
+
+            @Override
+            public void diskFull(File disk) {
+                diskFull.countDown();
+            }
+
+            @Override
+            public void diskFailed(File disk) {
+            }
+
+            @Override
+            public void diskAlmostFull(File disk) {
+            }
+
+            @Override
+            public void allDisksFull() {
+            }
+        });
+
+        // Dependency Injected class
+        ThresholdTestDiskChecker thresholdTestDiskChecker = new ThresholdTestDiskChecker(
+                baseConf.getDiskUsageThreshold(), baseConf.getDiskUsageWarnThreshold());
+        LedgerDirsMonitor ledgerDirsMonitor = new LedgerDirsMonitor(baseConf, thresholdTestDiskChecker,
+                ledgerDirsManager);
+        // set the ledgermonitor and idxmonitor and initiate/start it
+        bookie.ledgerMonitor = ledgerDirsMonitor;
+        bookie.idxMonitor = ledgerDirsMonitor;
+        bookie.ledgerMonitor.init();
+        bookie.ledgerMonitor.start();
+
+        // create ledgers and add fragments
+        LedgerHandle[] lhs = prepareData(3);
+        for (LedgerHandle lh : lhs) {
+            lh.close();
+        }
+
+        // delete ledger2 and ledger3
+        bkc.deleteLedger(lhs[1].getId());
+        bkc.deleteLedger(lhs[2].getId());
+
+        // since compaction intervals are too long, there is no possibility for compaction to get kicked in
+        // so all the entrylogs (0,1,2) should be available in the ledgerdirectory
+        assertTrue("All the entry log files ([0,1,2].log are not available, which is not expected"
+                + tmpDirs.get(0).getAbsolutePath(), TestUtils.hasLogFiles(tmpDirs.get(0), false, 0, 1, 2));
+        // validating that LedgerDirsListener are not triggered yet
+        assertTrue("Disk Full shouldn't have been triggered yet", diskFull.getCount() == 1);
+        assertTrue("Disk writable shouldn't have been triggered yet", diskWritable.getCount() == 1);
+
+        // set exception injection to true, so that next time when checkDir of DiskChecker (ThresholdTestDiskChecker) is
+        // called it will throw DiskOutOfSpaceException
+        thresholdTestDiskChecker.setInjectDiskOutOfSpaceException(true);
+
+        // now we are waiting for diskFull latch count to get to 0.
+        // we are waiting for diskCheckInterval period, so that next time when LedgerDirsMonitor monitors diskusage of
+        // its directories, it would get DiskOutOfSpaceException and hence diskFull of all LedgerDirsListener would be
+        // called.
+        diskFull.await(baseConf.getDiskCheckInterval() + 500, TimeUnit.MILLISECONDS);
+        // verifying that diskFull of all LedgerDirsListener are invoked, so countdown of diskFull should come down to 0
+        assertTrue("Disk Full should have been triggered", diskFull.getCount() == 0);
+        // making sure diskWritable of LedgerDirsListener are not invoked yet
+        assertTrue("Disk writable shouldn't have been triggered yet", diskWritable.getCount() == 1);
+        // waiting momentarily, because transition to Readonly mode happens asynchronously when there are no more
+        // writableLedgerDirs
+        Thread.sleep(500);
+        assertTrue("Bookie should be transitioned to ReadOnly", bookie.isReadOnly());
+        // since we set isForceGCAllowWhenNoSpace to true, when the disk is full (or when all disks are full) it does
+        // force GC.
+        // Because of getWritableLedgerDirsForNewLog, compaction would be able to create newlog and compact even though
+        // there are no writableLedgerDirs
+        assertFalse(
+                "Found entry log file ([0,1,2].log. They should have been compacted" + tmpDirs.get(0).getAbsolutePath(),
+                TestUtils.hasLogFiles(tmpDirs.get(0), true, 0, 1, 2));
+
+        try {
+            ledgerDirsManager.getWritableLedgerDirs();
+            fail("It is expected that there wont be any Writable LedgerDirs and getWritableLedgerDirs is supposed to throw NoWritableLedgerDirException");
+        } catch (NoWritableLedgerDirException nowritableDirsException) {
+        }
+
+        // disable exception injection
+        thresholdTestDiskChecker.setInjectDiskOutOfSpaceException(false);
+
+        // now we are waiting for diskWritable latch count to get to 0.
+        // we are waiting for diskCheckInterval period, so that next time when LedgerDirsMonitor monitors diskusage of
+        // its directories, it would find writableledgerdirectory and hence diskWritable of all LedgerDirsListener would
+        // be called.
+        diskWritable.await(baseConf.getDiskCheckInterval() + 500, TimeUnit.MILLISECONDS);
+        // verifying that diskWritable of all LedgerDirsListener are invoked, so countdown of diskWritable should come
+        // down to 0
+        assertTrue("Disk writable should have been triggered", diskWritable.getCount() == 0);
+        // waiting momentarily, because transition to ReadWrite mode happens asynchronously when there is new
+        // writableLedgerDirectory
+        Thread.sleep(500);
+        assertFalse("Bookie should be transitioned to ReadWrite", bookie.isReadOnly());
+    }
+}

http://git-wip-us.apache.org/repos/asf/bookkeeper/blob/59696600/bookkeeper-server/src/test/java/org/apache/bookkeeper/bookie/CompactionTest.java
----------------------------------------------------------------------
diff --git a/bookkeeper-server/src/test/java/org/apache/bookkeeper/bookie/CompactionTest.java b/bookkeeper-server/src/test/java/org/apache/bookkeeper/bookie/CompactionTest.java
index 067b411..f6253cf 100644
--- a/bookkeeper-server/src/test/java/org/apache/bookkeeper/bookie/CompactionTest.java
+++ b/bookkeeper-server/src/test/java/org/apache/bookkeeper/bookie/CompactionTest.java
@@ -31,12 +31,15 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.Collections;
 import java.util.Enumeration;
+import java.util.List;
 import java.util.Arrays;
 import java.util.Collection;
 
 import org.apache.bookkeeper.client.BookKeeper.DigestType;
 import org.apache.bookkeeper.bookie.EntryLogger.EntryLogScanner;
 import org.apache.bookkeeper.bookie.GarbageCollectorThread.CompactionScannerFactory;
+import org.apache.bookkeeper.bookie.LedgerDirsManager.NoWritableLedgerDirException;
+import org.apache.bookkeeper.client.BKException;
 import org.apache.bookkeeper.client.LedgerEntry;
 import org.apache.bookkeeper.client.LedgerHandle;
 import org.apache.bookkeeper.client.LedgerMetadata;
@@ -45,6 +48,7 @@ import org.apache.bookkeeper.conf.ServerConfiguration;
 import org.apache.bookkeeper.meta.LedgerManager;
 import org.apache.bookkeeper.meta.LedgerManagerFactory;
 import org.apache.bookkeeper.stats.NullStatsLogger;
+import org.apache.bookkeeper.proto.BookieServer;
 import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.GenericCallback;
 import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.LedgerMetadataListener;
 import org.apache.bookkeeper.proto.BookkeeperInternalCallbacks.Processor;
@@ -127,6 +131,7 @@ public class CompactionTest extends BookKeeperClusterTestCase {
         baseConf.setEntryLogFilePreAllocationEnabled(false);
         baseConf.setLedgerStorageClass(InterleavedLedgerStorage.class.getName());
         baseConf.setIsThrottleByBytes(this.isThrottleByBytes);
+        baseConf.setIsForceGCAllowWhenNoSpace(false);
 
         super.setUp();
     }
@@ -280,6 +285,111 @@ public class CompactionTest extends BookKeeperClusterTestCase {
         verifyLedger(lhs[0].getId(), 0, lhs[0].getLastAddConfirmed());
     }
 
+    @Test(timeout = 60000)
+    public void testMinorCompactionWithNoWritableLedgerDirs() throws Exception {
+        // prepare data
+        LedgerHandle[] lhs = prepareData(3, false);
+
+        for (LedgerHandle lh : lhs) {
+            lh.close();
+        }
+
+        // disable major compaction
+        baseConf.setMajorCompactionThreshold(0.0f);
+
+        // restart bookies
+        restartBookies(baseConf);
+
+        for (BookieServer bookieServer : bs) {
+            Bookie bookie = bookieServer.getBookie();
+            LedgerDirsManager ledgerDirsManager = bookie.getLedgerDirsManager();
+            List<File> ledgerDirs = ledgerDirsManager.getAllLedgerDirs();
+            // if all the discs are full then Major and Minor compaction would be disabled since
+            // 'isForceGCAllowWhenNoSpace' is not enabled. Check LedgerDirsListener of interleavedLedgerStorage.
+            for (File ledgerDir : ledgerDirs) {
+                ledgerDirsManager.addToFilledDirs(ledgerDir);
+            }
+        }
+
+        // remove ledger2 and ledger3
+        bkc.deleteLedger(lhs[1].getId());
+        bkc.deleteLedger(lhs[2].getId());
+
+        LOG.info("Finished deleting the ledgers contains most entries.");
+        Thread.sleep(baseConf.getMinorCompactionInterval() * 1000 + baseConf.getGcWaitTime());
+
+        // entry logs ([0,1,2].log) should still remain, because both major and Minor compaction are disabled.
+        for (File ledgerDirectory : tmpDirs) {
+            assertTrue(
+                    "All the entry log files ([0,1,2].log are not available, which is not expected" + ledgerDirectory,
+                    TestUtils.hasLogFiles(ledgerDirectory, false, 0, 1, 2));
+        }
+    }
+
+    @Test(timeout = 60000)
+    public void testMinorCompactionWithNoWritableLedgerDirsButIsForceGCAllowWhenNoSpaceIsSet() throws Exception {
+        // prepare data
+        LedgerHandle[] lhs = prepareData(3, false);
+
+        for (LedgerHandle lh : lhs) {
+            lh.close();
+        }
+
+        // disable major compaction
+        baseConf.setMajorCompactionThreshold(0.0f);
+
+        // here we are setting isForceGCAllowWhenNoSpace to true, so Major and Minor compaction wont be disabled in case
+        // when discs are full
+        baseConf.setIsForceGCAllowWhenNoSpace(true);
+
+        // restart bookies
+        restartBookies(baseConf);
+
+        for (BookieServer bookieServer : bs) {
+            Bookie bookie = bookieServer.getBookie();
+            LedgerDirsManager ledgerDirsManager = bookie.getLedgerDirsManager();
+            List<File> ledgerDirs = ledgerDirsManager.getAllLedgerDirs();
+            // Major and Minor compaction are not disabled even though discs are full. Check LedgerDirsListener of
+            // interleavedLedgerStorage.
+            for (File ledgerDir : ledgerDirs) {
+                ledgerDirsManager.addToFilledDirs(ledgerDir);
+            }
+        }
+
+        // remove ledger2 and ledger3
+        bkc.deleteLedger(lhs[1].getId());
+        bkc.deleteLedger(lhs[2].getId());
+
+        LOG.info("Finished deleting the ledgers contains most entries.");
+        Thread.sleep(baseConf.getMinorCompactionInterval() * 1000 + baseConf.getGcWaitTime() + 500);
+
+        // though all discs are added to filled dirs list, compaction would succeed, because in EntryLogger for
+        // allocating newlog
+        // we get getWritableLedgerDirsForNewLog() of ledgerDirsManager instead of getWritableLedgerDirs()
+        // entry logs ([0,1,2].log) should be compacted.
+        for (File ledgerDirectory : tmpDirs) {
+            assertFalse("Found entry log file ([0,1,2].log that should have not been compacted in ledgerDirectory: "
+                    + ledgerDirectory, TestUtils.hasLogFiles(ledgerDirectory, true, 0, 1, 2));
+        }
+
+        // even entry log files are removed, we still can access entries for ledger1
+        // since those entries has been compacted to new entry log
+        verifyLedger(lhs[0].getId(), 0, lhs[0].getLastAddConfirmed());
+
+        // for the sake of validity of test lets make sure that there is no writableLedgerDir in the bookies
+        for (BookieServer bookieServer : bs) {
+            Bookie bookie = bookieServer.getBookie();
+            LedgerDirsManager ledgerDirsManager = bookie.getLedgerDirsManager();
+            try {
+                List<File> ledgerDirs = ledgerDirsManager.getWritableLedgerDirs();
+                // it is expected not to have any writableLedgerDirs since we added all of them to FilledDirs
+                fail("It is expected not to have any writableLedgerDirs");
+            } catch (NoWritableLedgerDirException nwe) {
+
+            }
+        }
+    }
+    
     @Test(timeout=60000)
     public void testMajorCompaction() throws Exception {