You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by ji...@apache.org on 2018/01/10 21:57:05 UTC

[geode] branch develop updated: GEODE-3539: rule cleanup (#1242)

This is an automated email from the ASF dual-hosted git repository.

jinmeiliao pushed a commit to branch develop
in repository https://gitbox.apache.org/repos/asf/geode.git


The following commit(s) were added to refs/heads/develop by this push:
     new 48d93ed  GEODE-3539: rule cleanup (#1242)
48d93ed is described below

commit 48d93ed75f258434d09ae869547352173d6a9015
Author: jinmeiliao <ji...@pivotal.io>
AuthorDate: Wed Jan 10 13:57:01 2018 -0800

    GEODE-3539: rule cleanup (#1242)
    
    * add javadoc to explain the issue of with useTempWorkingDir
    * properly cleanup all residue files in the working dir
    * fix tests that do not need to use a tempWorkingDir
---
 .../geode/connectors/jdbc/JdbcDUnitTest.java       | 34 ++++++++++------------
 .../internal/cli/commands/ExportLogsDUnitTest.java |  5 ++--
 .../geode/test/dunit/rules/ClusterStartupRule.java | 10 +++++--
 .../apache/geode/test/dunit/rules/MemberVM.java    |  7 ++---
 4 files changed, 29 insertions(+), 27 deletions(-)

diff --git a/geode-connectors/src/test/java/org/apache/geode/connectors/jdbc/JdbcDUnitTest.java b/geode-connectors/src/test/java/org/apache/geode/connectors/jdbc/JdbcDUnitTest.java
index d571ecb..ea1878f 100644
--- a/geode-connectors/src/test/java/org/apache/geode/connectors/jdbc/JdbcDUnitTest.java
+++ b/geode-connectors/src/test/java/org/apache/geode/connectors/jdbc/JdbcDUnitTest.java
@@ -32,7 +32,6 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.cache.Region;
 import org.apache.geode.pdx.PdxInstance;
 import org.apache.geode.test.dunit.IgnoredException;
@@ -84,7 +83,6 @@ public class JdbcDUnitTest implements Serializable {
   @After
   public void tearDown() throws Exception {
     server.invoke(() -> {
-      CacheFactory.getAnyInstance().close();
       closeDB();
     });
   }
@@ -112,9 +110,9 @@ public class JdbcDUnitTest implements Serializable {
 
     server.invoke(() -> {
       PdxInstance pdxEmployee1 =
-          CacheFactory.getAnyInstance().createPdxInstanceFactory(Employee.class.getName())
+          ClusterStartupRule.getCache().createPdxInstanceFactory(Employee.class.getName())
               .writeString("name", "Emp1").writeInt("age", 55).create();
-      Region region = CacheFactory.getAnyInstance().getRegion(REGION_NAME);
+      Region region = ClusterStartupRule.getCache().getRegion(REGION_NAME);
       assertThatThrownBy(() -> region.put("key1", pdxEmployee1))
           .isExactlyInstanceOf(IllegalStateException.class).hasMessage(
               "JDBC mapping for region employees not found. Create the mapping with the gfsh command 'create jdbc-mapping'.");
@@ -129,12 +127,12 @@ public class JdbcDUnitTest implements Serializable {
 
     server.invoke(() -> {
       PdxInstance pdxEmployee1 =
-          CacheFactory.getAnyInstance().createPdxInstanceFactory(Employee.class.getName())
+          ClusterStartupRule.getCache().createPdxInstanceFactory(Employee.class.getName())
               .writeString("name", "Emp1").writeInt("age", 55).create();
-      Region region = CacheFactory.getAnyInstance().getRegion(REGION_NAME);
+      Region region = ClusterStartupRule.getCache().getRegion(REGION_NAME);
       region.put("key1", pdxEmployee1);
 
-      JdbcAsyncWriter asyncWriter = (JdbcAsyncWriter) CacheFactory.getAnyInstance()
+      JdbcAsyncWriter asyncWriter = (JdbcAsyncWriter) ClusterStartupRule.getCache()
           .getAsyncEventQueue("JAW").getAsyncEventListener();
       Awaitility.await().atMost(30, TimeUnit.SECONDS).until(() -> {
         assertThat(asyncWriter.getFailedEvents()).isEqualTo(1);
@@ -151,9 +149,9 @@ public class JdbcDUnitTest implements Serializable {
 
     server.invoke(() -> {
       PdxInstance pdxEmployee1 =
-          CacheFactory.getAnyInstance().createPdxInstanceFactory(Employee.class.getName())
+          ClusterStartupRule.getCache().createPdxInstanceFactory(Employee.class.getName())
               .writeString("name", "Emp1").writeInt("age", 55).create();
-      Region region = CacheFactory.getAnyInstance().getRegion(REGION_NAME);
+      Region region = ClusterStartupRule.getCache().getRegion(REGION_NAME);
       assertThatThrownBy(() -> region.put("key1", pdxEmployee1))
           .isExactlyInstanceOf(IllegalStateException.class).hasMessage(
               "JDBC mapping for region employees not found. Create the mapping with the gfsh command 'create jdbc-mapping'.");
@@ -167,9 +165,9 @@ public class JdbcDUnitTest implements Serializable {
 
     server.invoke(() -> {
       PdxInstance pdxEmployee1 =
-          CacheFactory.getAnyInstance().createPdxInstanceFactory(Employee.class.getName())
+          ClusterStartupRule.getCache().createPdxInstanceFactory(Employee.class.getName())
               .writeString("name", "Emp1").writeInt("age", 55).create();
-      Region region = CacheFactory.getAnyInstance().getRegion(REGION_NAME);
+      Region region = ClusterStartupRule.getCache().getRegion(REGION_NAME);
       assertThatThrownBy(() -> region.put("key1", pdxEmployee1))
           .isExactlyInstanceOf(IllegalStateException.class).hasMessage(
               "JDBC connection with name TestConnection not found. Create the connection with the gfsh command 'create jdbc-connection'");
@@ -183,11 +181,11 @@ public class JdbcDUnitTest implements Serializable {
     createMapping(REGION_NAME, CONNECTION_NAME);
     server.invoke(() -> {
       PdxInstance pdxEmployee1 =
-          CacheFactory.getAnyInstance().createPdxInstanceFactory(Employee.class.getName())
+          ClusterStartupRule.getCache().createPdxInstanceFactory(Employee.class.getName())
               .writeString("id", "key1").writeString("name", "Emp1").writeInt("age", 55).create();
 
       String key = "emp1";
-      CacheFactory.getAnyInstance().getRegion(REGION_NAME).put(key, pdxEmployee1);
+      ClusterStartupRule.getCache().getRegion(REGION_NAME).put(key, pdxEmployee1);
       assertTableHasEmployeeData(1, pdxEmployee1, key);
     });
   }
@@ -199,11 +197,11 @@ public class JdbcDUnitTest implements Serializable {
     createMapping(REGION_NAME, CONNECTION_NAME);
     server.invoke(() -> {
       PdxInstance pdxEmployee1 =
-          CacheFactory.getAnyInstance().createPdxInstanceFactory(Employee.class.getName())
+          ClusterStartupRule.getCache().createPdxInstanceFactory(Employee.class.getName())
               .writeString("id", "key1").writeString("name", "Emp1").writeInt("age", 55).create();
 
       String key = "emp1";
-      CacheFactory.getAnyInstance().getRegion(REGION_NAME).put(key, pdxEmployee1);
+      ClusterStartupRule.getCache().getRegion(REGION_NAME).put(key, pdxEmployee1);
       assertTableHasEmployeeData(1, pdxEmployee1, key);
     });
   }
@@ -215,7 +213,7 @@ public class JdbcDUnitTest implements Serializable {
     createMapping(REGION_NAME, CONNECTION_NAME);
     server.invoke(() -> {
       String key = "emp1";
-      Region region = CacheFactory.getAnyInstance().getRegion(REGION_NAME);
+      Region region = ClusterStartupRule.getCache().getRegion(REGION_NAME);
       region.get(key);
       assertThat(region.size()).isEqualTo(0);
     });
@@ -228,11 +226,11 @@ public class JdbcDUnitTest implements Serializable {
     createMapping(REGION_NAME, CONNECTION_NAME);
     server.invoke(() -> {
       PdxInstance pdxEmployee1 =
-          CacheFactory.getAnyInstance().createPdxInstanceFactory(Employee.class.getName())
+          ClusterStartupRule.getCache().createPdxInstanceFactory(Employee.class.getName())
               .writeString("id", "id1").writeString("name", "Emp1").writeInt("age", 55).create();
 
       String key = "id1";
-      Region region = CacheFactory.getAnyInstance().getRegion(REGION_NAME);
+      Region region = ClusterStartupRule.getCache().getRegion(REGION_NAME);
       region.put(key, pdxEmployee1);
       region.invalidate(key);
 
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsDUnitTest.java
index 7ae41b1..38598a7 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsDUnitTest.java
@@ -65,7 +65,7 @@ public class ExportLogsDUnitTest {
   private static final String ERROR_LOG_PREFIX = "[IGNORE]";
 
   @Rule
-  public ClusterStartupRule lsRule = new ClusterStartupRule().withTempWorkingDir().withLogFile();
+  public ClusterStartupRule lsRule = new ClusterStartupRule().withLogFile();
 
   @Rule
   public GfshCommandRule gfshConnector = new GfshCommandRule();
@@ -253,7 +253,6 @@ public class ExportLogsDUnitTest {
 
     File logFileForMember = new File(dirForMember, memberName + ".log");
     assertThat(logFileForMember).exists();
-    assertThat(fileNamesInDir).hasSize(1);
 
     String logFileContents = FileUtils.readLines(logFileForMember, Charset.defaultCharset())
         .stream().collect(joining("\n"));
@@ -283,7 +282,7 @@ public class ExportLogsDUnitTest {
         .describedAs(filesInDir.stream().map(File::getAbsolutePath).collect(joining(",")))
         .hasSize(1);
 
-    File unzippedLogFileDir = lsRule.getTempWorkingDir().newFolder("unzippedLogs");
+    File unzippedLogFileDir = new File(locatorWorkingDir, "unzippedLogs");
     ZipUtils.unzip(zipFilesInDir.get(0).getCanonicalPath(), unzippedLogFileDir.getCanonicalPath());
     return unzippedLogFileDir;
   }
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ClusterStartupRule.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ClusterStartupRule.java
index dc79bea..8a7d106 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ClusterStartupRule.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/ClusterStartupRule.java
@@ -104,8 +104,14 @@ public class ClusterStartupRule extends ExternalResource implements Serializable
    *
    * use this if you want to examine each member's file system without worrying about it's being
    * contaminated with DUnitLauncher's log files that exists in each dunit/vm folder such as
-   * locator0View.dat and locator0views.log and other random log files. This will cause the VMs to
-   * be bounced after test is done, because it dynamically changes the user.dir system property.
+   * locatorxxxView.dat and locatorxxxviews.log and other random log files.
+   *
+   * If the product code is doing new File(".") or new File("relative-path.log"), it will still
+   * pointing to the a File under the old CWD. So avoid using relative path and always use absolute
+   * path or with a parent dir when creating new File object.
+   *
+   * But this will cause the VMs to be bounced after test is done, because it dynamically changes
+   * the user.dir system property, causing slow running tests. Use with discretion.
    */
   public ClusterStartupRule withTempWorkingDir() {
     tempWorkingDir = new SerializableTemporaryFolder();
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/MemberVM.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/MemberVM.java
index 96bede2..c9f1212 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/MemberVM.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/MemberVM.java
@@ -103,10 +103,9 @@ public class MemberVM extends VMProvider implements Member {
        */
       vm.bounce();
     } else
-      // if using the dunit/vm dir as the preset working dir, need to cleanup dir except
-      // the locator0view* file, so that regions/indexes won't get persisted across tests
-      Arrays.stream(getWorkingDir().listFiles((dir, name) -> !name.startsWith("locator0view")))
-          .forEach(FileUtils::deleteQuietly);
+      // if using the dunit/vm dir as the preset working dir, need to cleanup dir
+      // so that regions/indexes won't get persisted across tests
+      Arrays.stream(getWorkingDir().listFiles()).forEach(FileUtils::deleteQuietly);
   }
 
   /**

-- 
To stop receiving notification emails like this one, please contact
['"commits@geode.apache.org" <co...@geode.apache.org>'].