You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by kr...@apache.org on 2019/03/08 13:31:59 UTC

[lucene-solr] branch jira/solr-13307 created (now f781877)

This is an automated email from the ASF dual-hosted git repository.

krisden pushed a change to branch jira/solr-13307
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git.


      at f781877  SOLR-13307: Ensure HDFS tests clear System properties they set

This branch includes the following new commits:

     new f781877  SOLR-13307: Ensure HDFS tests clear System properties they set

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[lucene-solr] 01/01: SOLR-13307: Ensure HDFS tests clear System properties they set

Posted by kr...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

krisden pushed a commit to branch jira/solr-13307
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit f78187783a18694af2dade46630612bc90e2d3fb
Author: Kevin Risden <kr...@apache.org>
AuthorDate: Fri Mar 8 08:30:46 2019 -0500

    SOLR-13307: Ensure HDFS tests clear System properties they set
    
    Signed-off-by: Kevin Risden <kr...@apache.org>
---
 .../solr/cloud/MoveReplicaHDFSFailoverTest.java    |  15 +-
 .../org/apache/solr/cloud/MoveReplicaHDFSTest.java |   4 +-
 .../collections/TestHdfsCloudBackupRestore.java    |  20 +-
 .../solr/cloud/hdfs/HDFSCollectionsAPITest.java    |  14 +-
 .../cloud/hdfs/HdfsBasicDistributedZkTest.java     |  13 +-
 .../hdfs/HdfsChaosMonkeyNothingIsSafeTest.java     |  14 +-
 .../cloud/hdfs/HdfsChaosMonkeySafeLeaderTest.java  |  12 +-
 .../apache/solr/cloud/hdfs/HdfsNNFailoverTest.java |  11 +-
 .../apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java |  14 +-
 .../cloud/hdfs/HdfsRestartWhileUpdatingTest.java   |  12 +-
 .../org/apache/solr/cloud/hdfs/HdfsTestUtil.java   |   3 +-
 .../HdfsTlogReplayBufferedWhileIndexingTest.java   |  19 +-
 .../cloud/hdfs/HdfsUnloadDistributedZkTest.java    |  10 +-
 .../hdfs/HdfsWriteToMultipleCollectionsTest.java   |  12 +-
 .../org/apache/solr/cloud/hdfs/StressHdfsTest.java |  36 ++-
 .../apache/solr/core/HdfsDirectoryFactoryTest.java | 251 ++++++++++-----------
 .../solr/handler/TestHdfsBackupRestoreCore.java    |  16 +-
 .../org/apache/solr/search/TestRecoveryHdfs.java   |  52 ++---
 .../org/apache/solr/update/TestHdfsUpdateLog.java  |  31 ++-
 19 files changed, 269 insertions(+), 290 deletions(-)

diff --git a/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSFailoverTest.java b/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSFailoverTest.java
index f6e9068..2a26956 100644
--- a/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSFailoverTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSFailoverTest.java
@@ -63,9 +63,17 @@ public class MoveReplicaHDFSFailoverTest extends SolrCloudTestCase {
 
   @AfterClass
   public static void teardownClass() throws Exception {
-    cluster.shutdown(); // need to close before the MiniDFSCluster
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
+    try {
+      shutdownCluster();
+    } finally {
+      try {
+        HdfsTestUtil.teardownClass(dfsCluster);
+      } finally {
+        dfsCluster = null;
+        System.clearProperty("solr.hdfs.home");
+        System.clearProperty("solr.hdfs.blockcache.enabled");
+      }
+    }
   }
 
   @Test
@@ -204,5 +212,4 @@ public class MoveReplicaHDFSFailoverTest extends SolrCloudTestCase {
       solrClient.add(collection, doc);
     }
   }
-
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java b/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
index b464341..9678c2e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/MoveReplicaHDFSTest.java
@@ -50,8 +50,8 @@ public class MoveReplicaHDFSTest extends MoveReplicaTest {
       HdfsTestUtil.teardownClass(dfsCluster);
     } finally {
       dfsCluster = null;
-      System.setProperty("solr.hdfs.blockcache.blocksperbank", "512");
-      System.setProperty("tests.hdfs.numdatanodes", "1");
+      System.clearProperty("solr.hdfs.blockcache.blocksperbank");
+      System.clearProperty("tests.hdfs.numdatanodes");
     }
   }
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
index c796394..a9901e8 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java
@@ -147,14 +147,17 @@ public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCa
 
   @AfterClass
   public static void teardownClass() throws Exception {
-    System.clearProperty("solr.hdfs.home");
-    System.clearProperty("solr.hdfs.default.backup.path");
-    System.clearProperty("test.build.data");
-    System.clearProperty("test.cache.data");
     IOUtils.closeQuietly(fs);
     fs = null;
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
+    try {
+      HdfsTestUtil.teardownClass(dfsCluster);
+    } finally {
+      dfsCluster = null;
+      System.clearProperty("solr.hdfs.home");
+      System.clearProperty("solr.hdfs.default.backup.path");
+      System.clearProperty("test.build.data");
+      System.clearProperty("test.cache.data");
+    }
   }
 
   @Override
@@ -211,11 +214,10 @@ public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCa
       assertTrue(expected.contains(d));
     }
   }
+
   @Override
   @Test
-  // commented 15-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 09-Aug-2018
   public void test() throws Exception {
     super.test();
   }
-
-  }
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HDFSCollectionsAPITest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HDFSCollectionsAPITest.java
index 425f4a9..c06b28a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HDFSCollectionsAPITest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HDFSCollectionsAPITest.java
@@ -55,9 +55,17 @@ public class HDFSCollectionsAPITest extends SolrCloudTestCase {
 
   @AfterClass
   public static void teardownClass() throws Exception {
-    cluster.shutdown(); // need to close before the MiniDFSCluster
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
+    try {
+      shutdownCluster(); // need to close before the MiniDFSCluster
+    } finally {
+      try {
+        HdfsTestUtil.teardownClass(dfsCluster);
+      } finally {
+        dfsCluster = null;
+        System.clearProperty("solr.hdfs.blockcache.enabled");
+        System.clearProperty("solr.hdfs.home");
+      }
+    }
   }
 
   public void testDataDirIsNotReused() throws Exception {
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZkTest.java
index 380916a..59293c6 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZkTest.java
@@ -28,13 +28,11 @@ import org.junit.BeforeClass;
 import com.carrotsearch.randomizedtesting.annotations.Nightly;
 import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
 
-
 @Slow
 @Nightly
 @ThreadLeakFilters(defaultFilters = true, filters = {
     BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
 })
-// commented 20-July-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
 public class HdfsBasicDistributedZkTest extends BasicDistributedZkTest {
   private static MiniDFSCluster dfsCluster;
   
@@ -51,15 +49,16 @@ public class HdfsBasicDistributedZkTest extends BasicDistributedZkTest {
 
   @AfterClass
   public static void teardownClass() throws Exception {
-    HdfsTestUtil.teardownClass(dfsCluster);
-    System.clearProperty("tests.hdfs.numdatanodes");
-    dfsCluster = null;
+    try {
+      HdfsTestUtil.teardownClass(dfsCluster);
+    } finally {
+      dfsCluster = null;
+      System.clearProperty("tests.hdfs.numdatanodes");
+    }
   }
-
   
   @Override
   protected String getDataDir(String dataDir) throws IOException {
     return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
   }
-  
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeyNothingIsSafeTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeyNothingIsSafeTest.java
index 076c678..3bdf77a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeyNothingIsSafeTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeyNothingIsSafeTest.java
@@ -33,20 +33,23 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
 @ThreadLeakFilters(defaultFilters = true, filters = {
     BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
 })
-// commented out on: 24-Dec-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028, https://issues.apache.org/jira/browse/SOLR-10191")
 public class HdfsChaosMonkeyNothingIsSafeTest extends ChaosMonkeyNothingIsSafeTest {
   private static MiniDFSCluster dfsCluster;
   
   @BeforeClass
   public static void setupClass() throws Exception {
-    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
     System.setProperty("solr.hdfs.blockcache.global", "true"); // always use global cache, this test can create a lot of directories
+    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
   }
   
   @AfterClass
   public static void teardownClass() throws Exception {
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
+    try {
+      HdfsTestUtil.teardownClass(dfsCluster);
+    } finally {
+      dfsCluster = null;
+      System.clearProperty("solr.hdfs.blockcache.global");
+    }
   }
   
   @Override
@@ -56,12 +59,9 @@ public class HdfsChaosMonkeyNothingIsSafeTest extends ChaosMonkeyNothingIsSafeTe
     // super class may hard code directory
     useFactory("org.apache.solr.core.HdfsDirectoryFactory");
   }
-
   
   @Override
   protected String getDataDir(String dataDir) throws IOException {
     return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
   }
-
-
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeySafeLeaderTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeySafeLeaderTest.java
index 517d932..ab5eb33 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeySafeLeaderTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeySafeLeaderTest.java
@@ -33,7 +33,6 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
 @ThreadLeakFilters(defaultFilters = true, filters = {
     BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
 })
-// commented out on: 24-Dec-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
 public class HdfsChaosMonkeySafeLeaderTest extends ChaosMonkeySafeLeaderTest {
   private static MiniDFSCluster dfsCluster;
   
@@ -45,8 +44,12 @@ public class HdfsChaosMonkeySafeLeaderTest extends ChaosMonkeySafeLeaderTest {
   
   @AfterClass
   public static void teardownClass() throws Exception {
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
+    try {
+      HdfsTestUtil.teardownClass(dfsCluster);
+    } finally {
+      dfsCluster = null;
+      System.clearProperty("solr.hdfs.blockcache.global");
+    }
   }
   
   @Override
@@ -56,12 +59,9 @@ public class HdfsChaosMonkeySafeLeaderTest extends ChaosMonkeySafeLeaderTest {
     // super class may hard code directory
     useFactory("org.apache.solr.core.HdfsDirectoryFactory");
   }
-
   
   @Override
   protected String getDataDir(String dataDir) throws IOException {
     return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
   }
-
-
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsNNFailoverTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsNNFailoverTest.java
index de92763..b793268 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsNNFailoverTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsNNFailoverTest.java
@@ -35,10 +35,8 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
 })
 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
 public class HdfsNNFailoverTest extends BasicDistributedZkTest {
-
   private static final String COLLECTION = "collection";
   private static MiniDFSCluster dfsCluster;
-
   
   @BeforeClass
   public static void setupClass() throws Exception {
@@ -47,8 +45,11 @@ public class HdfsNNFailoverTest extends BasicDistributedZkTest {
   
   @AfterClass
   public static void teardownClass() throws Exception {
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
+    try {
+      HdfsTestUtil.teardownClass(dfsCluster);
+    } finally {
+      dfsCluster = null;
+    }
   }
   
   @Override
@@ -61,7 +62,7 @@ public class HdfsNNFailoverTest extends BasicDistributedZkTest {
     sliceCount = 1;
     fixShardCount(TEST_NIGHTLY ? 7 : random().nextInt(2) + 1);
   }
-  
+
   protected String getSolrXml() {
     return "solr.xml";
   }
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java
index 876b5ee..b881292 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java
@@ -51,9 +51,17 @@ public class HdfsRecoveryZkTest extends RecoveryZkTest {
   
   @AfterClass
   public static void teardownClass() throws Exception {
-    cluster.shutdown(); // need to close before the MiniDFSCluster
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
+    try {
+      shutdownCluster(); // need to close before the MiniDFSCluster
+    } finally {
+      try {
+        HdfsTestUtil.teardownClass(dfsCluster);
+      } finally {
+        dfsCluster = null;
+        System.clearProperty("solr.hdfs.blockcache.blocksperbank");
+        System.clearProperty("solr.hdfs.home");
+      }
+    }
   }
 
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRestartWhileUpdatingTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRestartWhileUpdatingTest.java
index 3779dd7..02a7943 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRestartWhileUpdatingTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRestartWhileUpdatingTest.java
@@ -33,9 +33,7 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
 @ThreadLeakFilters(defaultFilters = true, filters = {
     BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
 })
-// 12-Jun-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") //2018-03-10
 public class HdfsRestartWhileUpdatingTest extends RestartWhileUpdatingTest {
-  
   public HdfsRestartWhileUpdatingTest() throws Exception {
     super();
   }
@@ -50,14 +48,16 @@ public class HdfsRestartWhileUpdatingTest extends RestartWhileUpdatingTest {
   
   @AfterClass
   public static void teardownClass() throws Exception {
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
+    try {
+      HdfsTestUtil.teardownClass(dfsCluster);
+    } finally {
+      dfsCluster = null;
+      System.clearProperty("solr.hdfs.blockcache.blocksperbank");
+    }
   }
-
   
   @Override
   protected String getDataDir(String dataDir) throws IOException {
     return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
   }
-
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java
index c9771e5..d601565 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java
@@ -113,7 +113,8 @@ public class HdfsTestUtil {
     System.setProperty("test.cache.data", dir + File.separator + "hdfs" + File.separator + "cache");
     System.setProperty("solr.lock.type", DirectoryFactory.LOCK_TYPE_HDFS);
 
-    System.setProperty("solr.hdfs.blockcache.global", Boolean.toString(LuceneTestCase.random().nextBoolean()));
+    System.setProperty("solr.hdfs.blockcache.global",
+        System.getProperty("solr.hdfs.blockcache.global", Boolean.toString(LuceneTestCase.random().nextBoolean())));
 
     final MiniDFSCluster dfsCluster;
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTlogReplayBufferedWhileIndexingTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTlogReplayBufferedWhileIndexingTest.java
index 4986090..52b3a82 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTlogReplayBufferedWhileIndexingTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTlogReplayBufferedWhileIndexingTest.java
@@ -30,35 +30,34 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
 
 @Slow
 @Nightly
-// 12-Jun-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
-// commented out on: 24-Dec-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Jul-2018
 @ThreadLeakFilters(defaultFilters = true, filters = {
     BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
 })
 public class HdfsTlogReplayBufferedWhileIndexingTest extends TlogReplayBufferedWhileIndexingTest {
-  
+  private static MiniDFSCluster dfsCluster;
+
   public HdfsTlogReplayBufferedWhileIndexingTest() throws Exception {
     super();
   }
 
-  private static MiniDFSCluster dfsCluster;
-  
   @BeforeClass
   public static void setupClass() throws Exception {
-    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
     System.setProperty("solr.hdfs.blockcache.blocksperbank", "2048");
+    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath());
   }
   
   @AfterClass
   public static void teardownClass() throws Exception {
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
+    try {
+      HdfsTestUtil.teardownClass(dfsCluster);
+    } finally {
+      dfsCluster = null;
+      System.clearProperty("solr.hdfs.blockcache.blocksperbank");
+    }
   }
-
   
   @Override
   protected String getDataDir(String dataDir) throws IOException {
     return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
   }
-
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java
index 117ec61..c15b8c3 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java
@@ -33,7 +33,6 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
 @ThreadLeakFilters(defaultFilters = true, filters = {
     BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
 })
-//Commented  4-Oct-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
 public class HdfsUnloadDistributedZkTest extends UnloadDistributedZkTest {
   private static MiniDFSCluster dfsCluster;
   
@@ -44,14 +43,15 @@ public class HdfsUnloadDistributedZkTest extends UnloadDistributedZkTest {
   
   @AfterClass
   public static void teardownClass() throws Exception {
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
+    try {
+      HdfsTestUtil.teardownClass(dfsCluster);
+    } finally {
+      dfsCluster = null;
+    }
   }
-
   
   @Override
   protected String getDataDir(String dataDir) throws IOException {
     return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
   }
-
 }
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
index 1905874..6f48b98 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java
@@ -57,10 +57,7 @@ import org.junit.Test;
 @ThreadLeakFilters(defaultFilters = true, filters = {
     BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
 })
-//Commented  4-Oct-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
 public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest {
-  private static final String SOLR_HDFS_HOME = "solr.hdfs.home";
-  private static final String SOLR_HDFS_BLOCKCACHE_GLOBAL = "solr.hdfs.blockcache.global";
   private static final String ACOLLECTION = "acollection";
   private static MiniDFSCluster dfsCluster;
   
@@ -72,8 +69,11 @@ public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest {
   
   @AfterClass
   public static void teardownClass() throws Exception {
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
+    try {
+      HdfsTestUtil.teardownClass(dfsCluster);
+    } finally {
+      dfsCluster = null;
+    }
   }
   
   @Override
@@ -168,7 +168,7 @@ public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest {
             BlockCache blockCache = ((BlockDirectoryCache) cache)
                 .getBlockCache();
             if (lastBlockCache != null) {
-              if (Boolean.getBoolean(SOLR_HDFS_BLOCKCACHE_GLOBAL)) {
+              if (Boolean.getBoolean("solr.hdfs.blockcache.global")) {
                 assertEquals(lastBlockCache, blockCache);
               } else {
                 assertNotSame(lastBlockCache, blockCache);
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
index b8aa53a..b3d5e27 100644
--- a/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
@@ -24,11 +24,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.cloud.BasicDistributedZkTest;
@@ -42,14 +40,12 @@ import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.TimeSource;
 import org.apache.solr.util.BadHdfsThreadsFilter;
 import org.apache.solr.util.TimeOut;
-import org.apache.zookeeper.KeeperException;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import java.io.IOException;
 import java.net.URI;
-import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -58,16 +54,13 @@ import java.util.TimerTask;
 import java.util.concurrent.TimeUnit;
 
 @Slow
+@Nightly
 @ThreadLeakFilters(defaultFilters = true, filters = {
     BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
 })
-// commented out on: 24-Dec-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 6-Sep-2018
-@Nightly
 public class StressHdfsTest extends BasicDistributedZkTest {
-
   private static final String DELETE_DATA_DIR_COLLECTION = "delete_data_dir";
   private static MiniDFSCluster dfsCluster;
-  
 
   private boolean testRestartIntoSafeMode;
   
@@ -78,8 +71,11 @@ public class StressHdfsTest extends BasicDistributedZkTest {
   
   @AfterClass
   public static void teardownClass() throws Exception {
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
+    try {
+      HdfsTestUtil.teardownClass(dfsCluster);
+    } finally {
+      dfsCluster = null;
+    }
   }
   
   @Override
@@ -93,13 +89,12 @@ public class StressHdfsTest extends BasicDistributedZkTest {
     fixShardCount(TEST_NIGHTLY ? 7 : random().nextInt(2) + 1);
     testRestartIntoSafeMode = random().nextBoolean();
   }
-  
+
   protected String getSolrXml() {
     return "solr.xml";
   }
 
   @Test
-  //2018-06-18 (commented) @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
   public void test() throws Exception {
     randomlyEnableAutoSoftCommit();
     
@@ -121,7 +116,7 @@ public class StressHdfsTest extends BasicDistributedZkTest {
         // enter safe mode and restart a node
         NameNodeAdapter.enterSafeMode(dfsCluster.getNameNode(), false);
         
-        int rnd = LuceneTestCase.random().nextInt(10000);
+        int rnd = random().nextInt(10000);
         
         timer.schedule(new TimerTask() {
           
@@ -140,10 +135,7 @@ public class StressHdfsTest extends BasicDistributedZkTest {
     }
   }
 
-  private void createAndDeleteCollection() throws SolrServerException,
-      IOException, Exception, KeeperException, InterruptedException,
-      URISyntaxException {
-    
+  private void createAndDeleteCollection() throws Exception {
     boolean overshard = random().nextBoolean();
     int rep;
     int nShards;
@@ -238,11 +230,11 @@ public class StressHdfsTest extends BasicDistributedZkTest {
     for (String dataDir : dataDirs) {
       Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
       conf.setBoolean("fs.hdfs.impl.disable.cache", true);
-      FileSystem fs = FileSystem.get(new URI(HdfsTestUtil.getURI(dfsCluster)), conf);
-      assertFalse(
-          "Data directory exists after collection removal : " + dataDir,
-          fs.exists(new Path(dataDir)));
-      fs.close();
+      try(FileSystem fs = FileSystem.get(new URI(HdfsTestUtil.getURI(dfsCluster)), conf)) {
+        assertFalse(
+            "Data directory exists after collection removal : " + dataDir,
+            fs.exists(new Path(dataDir)));
+      }
     }
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java b/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java
index 2ea85fd..c599fb7 100644
--- a/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java
+++ b/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java
@@ -51,105 +51,96 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
     BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
 })
 public class HdfsDirectoryFactoryTest extends SolrTestCaseJ4 {
-  
   private static MiniDFSCluster dfsCluster;
   
   @BeforeClass
   public static void setupClass() throws Exception {
-    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath(), false);
     System.setProperty("solr.hdfs.blockcache.blocksperbank", "1024");
+    dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath(), false);
   }
   
   @AfterClass
   public static void teardownClass() throws Exception {
-    HdfsTestUtil.teardownClass(dfsCluster);
-    System.clearProperty("solr.hdfs.home");
-    System.clearProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB);
-    System.clearProperty("solr.hdfs.blockcache.blocksperbank");
-    dfsCluster = null;
+    try {
+      HdfsTestUtil.teardownClass(dfsCluster);
+    } finally {
+      dfsCluster = null;
+      System.clearProperty("solr.hdfs.home");
+      System.clearProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB);
+      System.clearProperty("solr.hdfs.blockcache.blocksperbank");
+    }
   }
 
   @Test
   public void testInitArgsOrSysPropConfig() throws Exception {
-    
-    HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory();
-    
-    // test sys prop config
-    
-    System.setProperty("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr1");
-    hdfsFactory.init(new NamedList<>());
-    String dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
-
-    assertTrue(dataHome.endsWith("/solr1/mock/data"));
-    
-    System.clearProperty("solr.hdfs.home");
-    
-    // test init args config
-    
-    NamedList<Object> nl = new NamedList<>();
-    nl.add("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr2");
-    hdfsFactory.init(nl);
-    dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
-
-    assertTrue(dataHome.endsWith("/solr2/mock/data"));
-    
-    // test sys prop and init args config - init args wins
-    
-    System.setProperty("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr1");
-    hdfsFactory.init(nl);
-    dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
-
-    assertTrue(dataHome.endsWith("/solr2/mock/data"));
-    
-    System.clearProperty("solr.hdfs.home");
-    
-    
-    // set conf dir by sys prop
-    
-    Path confDir = createTempDir();
-    
-    System.setProperty(HdfsDirectoryFactory.CONFIG_DIRECTORY, confDir.toString());
-    
-    Directory dir = hdfsFactory.create(HdfsTestUtil.getURI(dfsCluster) + "/solr", NoLockFactory.INSTANCE, DirContext.DEFAULT);
-    try {
-      assertEquals(confDir.toString(), hdfsFactory.getConfDir());
-    } finally {
-      dir.close();
+    try(HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory()) {
+
+      // test sys prop config
+      System.setProperty("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr1");
+      hdfsFactory.init(new NamedList<>());
+      String dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
+
+      assertTrue(dataHome.endsWith("/solr1/mock/data"));
+
+      System.clearProperty("solr.hdfs.home");
+
+      // test init args config
+      NamedList<Object> nl = new NamedList<>();
+      nl.add("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr2");
+      hdfsFactory.init(nl);
+      dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
+
+      assertTrue(dataHome.endsWith("/solr2/mock/data"));
+
+      // test sys prop and init args config - init args wins
+      System.setProperty("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr1");
+      hdfsFactory.init(nl);
+      dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
+
+      assertTrue(dataHome.endsWith("/solr2/mock/data"));
+
+      System.clearProperty("solr.hdfs.home");
+
+      // set conf dir by sys prop
+      Path confDir = createTempDir();
+
+      System.setProperty(HdfsDirectoryFactory.CONFIG_DIRECTORY, confDir.toString());
+
+      try (Directory dir = hdfsFactory
+          .create(HdfsTestUtil.getURI(dfsCluster) + "/solr", NoLockFactory.INSTANCE, DirContext.DEFAULT)) {
+        assertEquals(confDir.toString(), hdfsFactory.getConfDir());
+      }
+
+      // check bool and int getConf impls
+      nl = new NamedList<>();
+      nl.add(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 4);
+      System.setProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, "3");
+      nl.add(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, true);
+      System.setProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "false");
+
+      hdfsFactory.init(nl);
+
+      assertEquals(4, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0));
+      assertTrue(hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false));
+
+      nl = new NamedList<>();
+      hdfsFactory.init(nl);
+      System.setProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "true");
+
+      assertEquals(3, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0));
+      assertTrue(hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false));
+
+      System.clearProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB);
+      System.clearProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED);
+
+      assertEquals(0, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0));
+      assertFalse(hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false));
     }
-    
-    // check bool and int getConf impls
-    nl = new NamedList<>();
-    nl.add(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 4);
-    System.setProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, "3");
-    nl.add(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, true);
-    System.setProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "false");
-    
-    hdfsFactory.init(nl);
-    
-    assertEquals(4, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0));
-    assertEquals(true, hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false));
-    
-    nl = new NamedList<>();
-    hdfsFactory.init(nl);
-    System.setProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "true");
-    
-    assertEquals(3, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0));
-    assertEquals(true, hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false));
-    
-    System.clearProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB);
-    System.clearProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED);
-    
-    assertEquals(0, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0));
-    assertEquals(false, hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false));
-    
-    hdfsFactory.close();
   }
 
   @Test
   public void testCleanupOldIndexDirectories() throws Exception {
-
     try (HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory()) {
-
       System.setProperty("solr.hdfs.home", HdfsTestUtil.getURI(dfsCluster) + "/solr1");
       hdfsFactory.init(new NamedList<>());
       String dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor());
@@ -183,56 +174,56 @@ public class HdfsDirectoryFactoryTest extends SolrTestCaseJ4 {
     conf.set("dfs.permissions.enabled", "false");
 
     Random r = random();
-    HdfsDirectoryFactory factory = new HdfsDirectoryFactory();
-    SolrMetricManager metricManager = new SolrMetricManager();
-    String registry = TestUtil.randomSimpleString(r, 2, 10);
-    String scope = TestUtil.randomSimpleString(r,2, 10);
-    Map<String,String> props = new HashMap<String,String>();
-    props.put(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr");
-    props.put(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "false");
-    props.put(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_ENABLE, "false");
-    props.put(HdfsDirectoryFactory.LOCALITYMETRICS_ENABLED, "true");
-    factory.init(new NamedList<>(props));
-    factory.initializeMetrics(metricManager, registry, "foo", scope);
-
-    // get the metrics map for the locality bean
-    MetricsMap metrics = (MetricsMap)((SolrMetricManager.GaugeWrapper)metricManager.registry(registry).getMetrics().get("OTHER." + scope + ".hdfsLocality")).getGauge();
-    // We haven't done anything, so there should be no data
-    Map<String,Object> statistics = metrics.getValue();
-    assertEquals("Saw bytes that were not written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), 0l,
-        statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL));
-    assertEquals(
-        "Counted bytes as local when none written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO), 0,
-        statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO));
-    
-    // create a directory and a file
-    String path = HdfsTestUtil.getURI(dfsCluster) + "/solr3/";
-    Directory dir = factory.create(path, NoLockFactory.INSTANCE, DirContext.DEFAULT);
-    try(IndexOutput writer = dir.createOutput("output", null)) {
-      writer.writeLong(42l);
+    try(HdfsDirectoryFactory factory = new HdfsDirectoryFactory()) {
+      SolrMetricManager metricManager = new SolrMetricManager();
+      String registry = TestUtil.randomSimpleString(r, 2, 10);
+      String scope = TestUtil.randomSimpleString(r, 2, 10);
+      Map<String, String> props = new HashMap<String, String>();
+      props.put(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr");
+      props.put(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "false");
+      props.put(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_ENABLE, "false");
+      props.put(HdfsDirectoryFactory.LOCALITYMETRICS_ENABLED, "true");
+      factory.init(new NamedList<>(props));
+      factory.initializeMetrics(metricManager, registry, "foo", scope);
+
+      // get the metrics map for the locality bean
+      MetricsMap metrics = (MetricsMap) ((SolrMetricManager.GaugeWrapper) metricManager.registry(registry).getMetrics().get("OTHER." + scope + ".hdfsLocality")).getGauge();
+      // We haven't done anything, so there should be no data
+      Map<String, Object> statistics = metrics.getValue();
+      assertEquals("Saw bytes that were not written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), 0l,
+          statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL));
+      assertEquals(
+          "Counted bytes as local when none written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO), 0,
+          statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO));
+
+      // create a directory and a file
+      String path = HdfsTestUtil.getURI(dfsCluster) + "/solr3/";
+      try (Directory dir = factory.create(path, NoLockFactory.INSTANCE, DirContext.DEFAULT)) {
+        try (IndexOutput writer = dir.createOutput("output", null)) {
+          writer.writeLong(42L);
+        }
+
+        final long long_bytes = Long.SIZE / Byte.SIZE;
+
+        // no locality because hostname not set
+        factory.setHost("bogus");
+        statistics = metrics.getValue();
+        assertEquals("Wrong number of total bytes counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL),
+            long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL));
+        assertEquals("Wrong number of total blocks counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL),
+            1, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL));
+        assertEquals(
+            "Counted block as local when bad hostname set: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL),
+            0, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL));
+
+        // set hostname and check again
+        factory.setHost("127.0.0.1");
+        statistics = metrics.getValue();
+        assertEquals(
+            "Did not count block as local after setting hostname: "
+                + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL),
+            long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL));
+      }
     }
-    
-    final long long_bytes = Long.SIZE / Byte.SIZE;
-    
-    // no locality because hostname not set
-    factory.setHost("bogus");
-    statistics = metrics.getValue();
-    assertEquals("Wrong number of total bytes counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL),
-        long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL));
-    assertEquals("Wrong number of total blocks counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL),
-        1, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL));
-    assertEquals(
-        "Counted block as local when bad hostname set: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL),
-        0, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL));
-        
-    // set hostname and check again
-    factory.setHost("127.0.0.1");
-    statistics = metrics.getValue();
-    assertEquals(
-        "Did not count block as local after setting hostname: "
-            + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL),
-        long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL));
-        
-    factory.close();
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java b/solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java
index 038e451..60f77b4 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java
@@ -14,7 +14,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.solr.handler;
 
 import java.io.IOException;
@@ -145,14 +144,17 @@ public class TestHdfsBackupRestoreCore extends SolrCloudTestCase {
 
   @AfterClass
   public static void teardownClass() throws Exception {
-    System.clearProperty("solr.hdfs.home");
-    System.clearProperty("solr.hdfs.default.backup.path");
-    System.clearProperty("test.build.data");
-    System.clearProperty("test.cache.data");
     IOUtils.closeQuietly(fs);
     fs = null;
-    HdfsTestUtil.teardownClass(dfsCluster);
-    dfsCluster = null;
+    try {
+      HdfsTestUtil.teardownClass(dfsCluster);
+    } finally {
+      dfsCluster = null;
+      System.clearProperty("solr.hdfs.home");
+      System.clearProperty("solr.hdfs.default.backup.path");
+      System.clearProperty("test.build.data");
+      System.clearProperty("test.cache.data");
+    }
   }
 
   @Test
diff --git a/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java b/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java
index 75f3012..33caf74 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java
@@ -16,7 +16,6 @@
  */
 package org.apache.solr.search;
 
-
 import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
 
 import java.io.IOException;
@@ -65,18 +64,14 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
     BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
 })
 // TODO: longer term this should be combined with TestRecovery somehow ??
-// commented out on: 24-Dec-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
 public class TestRecoveryHdfs extends SolrTestCaseJ4 {
-
   // means that we've seen the leader and have version info (i.e. we are a non-leader replica)
   private static String FROM_LEADER = DistribPhase.FROMLEADER.toString(); 
 
   private static int timeout=60;  // acquire timeout in seconds.  change this to a huge number when debugging to prevent threads from advancing.
   
   private static MiniDFSCluster dfsCluster;
-
   private static String hdfsUri;
-  
   private static FileSystem fs;
   
   @BeforeClass
@@ -100,16 +95,21 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
   
   @AfterClass
   public static void afterClass() throws Exception {
-    System.clearProperty("solr.ulog.dir");
-    System.clearProperty("test.build.data");
-    System.clearProperty("test.cache.data");
-    deleteCore();
     IOUtils.closeQuietly(fs);
     fs = null;
-    HdfsTestUtil.teardownClass(dfsCluster);
-    
-    hdfsDataDir = null;
-    dfsCluster = null;
+    try {
+      deleteCore();
+    } finally {
+      try {
+        HdfsTestUtil.teardownClass(dfsCluster);
+      } finally {
+        dfsCluster = null;
+        hdfsDataDir = null;
+        System.clearProperty("solr.ulog.dir");
+        System.clearProperty("test.build.data");
+        System.clearProperty("test.cache.data");
+      }
+    }
   }
 
   @Test
@@ -135,11 +135,9 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
     assertTrue("Expected to find tlogs with a replication factor of 2", foundRep2);
   }
   
-  
   @Test
   public void testLogReplay() throws Exception {
     try {
-
       DirectUpdateHandler2.commitOnClose = false;
       final Semaphore logReplay = new Semaphore(0);
       final Semaphore logReplayFinish = new Semaphore(0);
@@ -154,7 +152,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
 
       UpdateLog.testing_logReplayFinishHook = logReplayFinish::release;
 
-
       clearIndex();
       assertU(commit());
 
@@ -230,12 +227,10 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
       UpdateLog.testing_logReplayHook = null;
       UpdateLog.testing_logReplayFinishHook = null;
     }
-
   }
 
   @Test
   public void testBuffering() throws Exception {
-
     DirectUpdateHandler2.commitOnClose = false;
     final Semaphore logReplay = new Semaphore(0);
     final Semaphore logReplayFinish = new Semaphore(0);
@@ -250,7 +245,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
 
     UpdateLog.testing_logReplayFinishHook = logReplayFinish::release;
 
-
     SolrQueryRequest req = req();
     UpdateHandler uhandler = req.getCore().getUpdateHandler();
     UpdateLog ulog = uhandler.getUpdateLog();
@@ -383,14 +377,11 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
 
       req().close();
     }
-
   }
 
-
   @Test
   @Ignore("HDFS-3107: no truncate support yet")
   public void testDropBuffered() throws Exception {
-
     DirectUpdateHandler2.commitOnClose = false;
     final Semaphore logReplay = new Semaphore(0);
     final Semaphore logReplayFinish = new Semaphore(0);
@@ -499,12 +490,8 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
           +"]"
       );
 
-
       updateJ(jsonAdd(sdoc("id","C2", "_version_","302")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
 
-
-
-
       assertEquals(UpdateLog.State.ACTIVE, ulog.getState()); // leave each test method in a good state
     } finally {
       DirectUpdateHandler2.commitOnClose = true;
@@ -513,13 +500,10 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
 
       req().close();
     }
-
   }
 
-
   @Test
   public void testExistOldBufferLog() throws Exception {
-
     DirectUpdateHandler2.commitOnClose = false;
 
     SolrQueryRequest req = req();
@@ -587,11 +571,8 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
 
       req().close();
     }
-
   }
 
-
-
   // make sure that on a restart, versions don't start too low
   @Test
   public void testVersionsOnRestart() throws Exception {
@@ -616,7 +597,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
     assertJQ(req("qt","/get", "getVersions","2")
         ,"/versions==[" + v2 + "," + v1a + "]"
     );
-
   }
 
   // make sure that log isn't needlessly replayed after a clean close
@@ -636,7 +616,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
 
     UpdateLog.testing_logReplayFinishHook = () -> logReplayFinish.release();
 
-
     SolrQueryRequest req = req();
     UpdateHandler uhandler = req.getCore().getUpdateHandler();
     UpdateLog ulog = uhandler.getUpdateLog();
@@ -669,7 +648,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
     }
   }
   
-  
   private void addDocs(int nDocs, int start, LinkedList<Long> versions) throws Exception {
     for (int i=0; i<nDocs; i++) {
       versions.addFirst( addAndGetVersion( sdoc("id",Integer.toString(start + nDocs)) , null) );
@@ -783,7 +761,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
   // test that a partially written last tlog entry (that will cause problems for both reverse reading and for
   // log replay) doesn't stop us from coming up, and from recovering the documents that were not cut off.
   //
-
   @Test
   public void testTruncatedLog() throws Exception {
     try {
@@ -850,7 +827,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
     }
   }
 
-
   //
   // test that a corrupt tlog doesn't stop us from coming up
   //
@@ -912,8 +888,6 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 {
     }
   }
 
-
-
   // in rare circumstances, two logs can be left uncapped (lacking a commit at the end signifying that all the content in the log was committed)
   @Test
   public void testRecoveryMultipleLogs() throws Exception {
diff --git a/solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java b/solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java
index 62bd1d8..cfdfef5 100644
--- a/solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java
+++ b/solr/core/src/test/org/apache/solr/update/TestHdfsUpdateLog.java
@@ -37,13 +37,9 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
 @ThreadLeakFilters(defaultFilters = true, filters = {
     BadHdfsThreadsFilter.class // hdfs currently leaks thread(s)
 })
-// commented out on: 24-Dec-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 23-Aug-2018
 public class TestHdfsUpdateLog extends SolrTestCaseJ4 {
-  
   private static MiniDFSCluster dfsCluster;
-
   private static String hdfsUri;
-  
   private static FileSystem fs;
   
   @BeforeClass
@@ -69,22 +65,25 @@ public class TestHdfsUpdateLog extends SolrTestCaseJ4 {
   
   @AfterClass
   public static void afterClass() throws Exception {
-    System.clearProperty("solr.ulog.dir");
-    System.clearProperty("test.build.data");
-    System.clearProperty("test.cache.data");
-    deleteCore();
     IOUtils.closeQuietly(fs);
     fs = null;
-    HdfsTestUtil.teardownClass(dfsCluster);
-    
-    hdfsDataDir = null;
-    dfsCluster = null;
+    try {
+      deleteCore();
+    } finally {
+      try {
+        HdfsTestUtil.teardownClass(dfsCluster);
+      } finally {
+        hdfsDataDir = null;
+        dfsCluster = null;
+        System.clearProperty("solr.ulog.dir");
+        System.clearProperty("test.build.data");
+        System.clearProperty("test.cache.data");
+      }
+    }
   }
 
   @Test
-  //28-June-2018  @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
   public void testFSThreadSafety() throws Exception {
-
     final SolrQueryRequest req = req();
     final UpdateHandler uhandler = req.getCore().getUpdateHandler();
     ((DirectUpdateHandler2) uhandler).getCommitTracker().setTimeUpperBound(100);
@@ -131,15 +130,11 @@ public class TestHdfsUpdateLog extends SolrTestCaseJ4 {
         }
       }
     };
-    
-
 
     thread.start();
     thread2.start();
     thread.join();
     thread2.join();
-    
   }
-
 }