You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2017/03/15 20:23:40 UTC

[01/10] geode git commit: GEODE-2653: Add FlakyTest category to test with Thread.sleep [Forced Update!]

Repository: geode
Updated Branches:
  refs/heads/feature/GEODE-2645 a9435d644 -> b48f5a354 (forced update)


GEODE-2653: Add FlakyTest category to test with Thread.sleep


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/f4701a1f
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/f4701a1f
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/f4701a1f

Branch: refs/heads/feature/GEODE-2645
Commit: f4701a1f97df18b394bbf3f3fcd8c23156d95510
Parents: aa922bf
Author: Kirk Lund <kl...@apache.org>
Authored: Mon Mar 13 15:39:43 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Mon Mar 13 15:39:43 2017 -0700

----------------------------------------------------------------------
 .../internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java  | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/f4701a1f/geode-core/src/test/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java b/geode-core/src/test/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java
index b3076be..05ab6f7 100644
--- a/geode-core/src/test/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/distributed/internal/membership/gms/membership/GMSJoinLeaveJUnitTest.java
@@ -55,6 +55,7 @@ import org.apache.geode.distributed.internal.membership.gms.messages.RemoveMembe
 import org.apache.geode.distributed.internal.membership.gms.messages.ViewAckMessage;
 import org.apache.geode.internal.Version;
 import org.apache.geode.security.AuthenticationFailedException;
+import org.apache.geode.test.junit.categories.FlakyTest;
 import org.apache.geode.test.junit.categories.IntegrationTest;
 import org.apache.geode.test.junit.categories.MembershipTest;
 import org.junit.After;
@@ -326,6 +327,7 @@ public class GMSJoinLeaveJUnitTest {
     return memberList;
   }
 
+  @Category(FlakyTest.class) // GEODE-2653: flaky due to Thread.sleep
   @Test
   public void testRemoveMember() throws Exception {
     initMocks();


[04/10] geode git commit: GEODE-2643: Combine chunk and file region into a single region

Posted by kl...@apache.org.
GEODE-2643: Combine chunk and file region into a single region

* removed file and chunk count from stat
* removed tests that were doing checks against chunk bucketRegions


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/d8a89730
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/d8a89730
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/d8a89730

Branch: refs/heads/feature/GEODE-2645
Commit: d8a897305c1fe9954f2511a6af0c8a633ef152d0
Parents: 3c212fb
Author: Jason Huynh <hu...@gmail.com>
Authored: Mon Mar 13 16:35:56 2017 -0700
Committer: Jason Huynh <hu...@gmail.com>
Committed: Tue Mar 14 11:47:54 2017 -0700

----------------------------------------------------------------------
 .../lucene/internal/IndexRepositoryFactory.java |  28 +++--
 .../LuceneIndexForPartitionedRegion.java        |  76 ++++---------
 .../internal/directory/RegionDirectory.java     |   5 +-
 .../lucene/internal/filesystem/FileSystem.java  |  70 +++++-------
 .../internal/filesystem/FileSystemStats.java    |  20 ----
 .../lucene/LuceneIndexDestroyDUnitTest.java     |   5 -
 .../LuceneIndexMaintenanceIntegrationTest.java  |   2 -
 ...LuceneQueriesPersistenceIntegrationTest.java |   2 -
 .../LuceneIndexForPartitionedRegionTest.java    | 102 ++---------------
 .../LuceneIndexRecoveryHAIntegrationTest.java   |   6 +-
 .../PartitionedRepositoryManagerJUnitTest.java  |  56 ++++------
 .../directory/RegionDirectoryJUnitTest.java     |   3 +-
 .../DistributedScoringJUnitTest.java            |   5 +-
 .../filesystem/FileSystemJUnitTest.java         | 112 ++++---------------
 .../IndexRepositoryImplJUnitTest.java           |   5 +-
 .../IndexRepositoryImplPerformanceTest.java     |  13 +--
 .../cache/lucene/test/LuceneTestUtilities.java  |   3 -
 17 files changed, 131 insertions(+), 382 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/IndexRepositoryFactory.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/IndexRepositoryFactory.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/IndexRepositoryFactory.java
index 7e685b7..475311d 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/IndexRepositoryFactory.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/IndexRepositoryFactory.java
@@ -43,20 +43,18 @@ public class IndexRepositoryFactory {
       LuceneIndexImpl index, PartitionedRegion userRegion, final IndexRepository oldRepository)
       throws IOException {
     LuceneIndexForPartitionedRegion indexForPR = (LuceneIndexForPartitionedRegion) index;
-    final PartitionedRegion fileRegion = indexForPR.getFileRegion();
-    final PartitionedRegion chunkRegion = indexForPR.getChunkRegion();
+    final PartitionedRegion fileRegion = indexForPR.getFileAndChunkRegion();
 
-    BucketRegion fileBucket = getMatchingBucket(fileRegion, bucketId);
-    BucketRegion chunkBucket = getMatchingBucket(chunkRegion, bucketId);
+    BucketRegion fileAndChunkBucket = getMatchingBucket(fileRegion, bucketId);
     BucketRegion dataBucket = getMatchingBucket(userRegion, bucketId);
     boolean success = false;
-    if (fileBucket == null || chunkBucket == null) {
+    if (fileAndChunkBucket == null) {
       if (oldRepository != null) {
         oldRepository.cleanup();
       }
       return null;
     }
-    if (!chunkBucket.getBucketAdvisor().isPrimary()) {
+    if (!fileAndChunkBucket.getBucketAdvisor().isPrimary()) {
       if (oldRepository != null) {
         oldRepository.cleanup();
       }
@@ -71,26 +69,26 @@ public class IndexRepositoryFactory {
       oldRepository.cleanup();
     }
     DistributedLockService lockService = getLockService();
-    String lockName = getLockName(bucketId, fileBucket);
+    String lockName = getLockName(fileAndChunkBucket);
     while (!lockService.lock(lockName, 100, -1)) {
-      if (!chunkBucket.getBucketAdvisor().isPrimary()) {
+      if (!fileAndChunkBucket.getBucketAdvisor().isPrimary()) {
         return null;
       }
     }
 
     final IndexRepository repo;
     try {
-      RegionDirectory dir = new RegionDirectory(getBucketTargetingMap(fileBucket, bucketId),
-          getBucketTargetingMap(chunkBucket, bucketId), indexForPR.getFileSystemStats());
+      RegionDirectory dir = new RegionDirectory(getBucketTargetingMap(fileAndChunkBucket, bucketId),
+          indexForPR.getFileSystemStats());
       IndexWriterConfig config = new IndexWriterConfig(indexForPR.getAnalyzer());
       IndexWriter writer = new IndexWriter(dir, config);
-      repo = new IndexRepositoryImpl(fileBucket, writer, serializer, indexForPR.getIndexStats(),
-          dataBucket, lockService, lockName);
+      repo = new IndexRepositoryImpl(fileAndChunkBucket, writer, serializer,
+          indexForPR.getIndexStats(), dataBucket, lockService, lockName);
       success = true;
       return repo;
     } catch (IOException e) {
       logger.info("Exception thrown while constructing Lucene Index for bucket:" + bucketId
-          + " for file region:" + fileBucket.getFullPath());
+          + " for file region:" + fileAndChunkBucket.getFullPath());
       throw e;
     } finally {
       if (!success) {
@@ -104,8 +102,8 @@ public class IndexRepositoryFactory {
     return new BucketTargetingMap(region, bucketId);
   }
 
-  private String getLockName(final Integer bucketId, final BucketRegion fileBucket) {
-    return FILE_REGION_LOCK_FOR_BUCKET_ID + fileBucket.getFullPath();
+  private String getLockName(final BucketRegion fileAndChunkBucket) {
+    return FILE_REGION_LOCK_FOR_BUCKET_ID + fileAndChunkBucket.getFullPath();
   }
 
   private DistributedLockService getLockService() {

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegion.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegion.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegion.java
index 7274d6a..80e0c44 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegion.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegion.java
@@ -29,7 +29,6 @@ import org.apache.geode.cache.asyncqueue.internal.AsyncEventQueueImpl;
 import org.apache.geode.cache.execute.FunctionService;
 import org.apache.geode.cache.execute.ResultCollector;
 import org.apache.geode.cache.lucene.internal.directory.DumpDirectoryFiles;
-import org.apache.geode.cache.lucene.internal.filesystem.ChunkKey;
 import org.apache.geode.cache.lucene.internal.filesystem.File;
 import org.apache.geode.cache.lucene.internal.filesystem.FileSystemStats;
 import org.apache.geode.cache.lucene.internal.partition.BucketTargetingFixedResolver;
@@ -48,12 +47,10 @@ import java.util.Set;
 
 /* wrapper of IndexWriter */
 public class LuceneIndexForPartitionedRegion extends LuceneIndexImpl {
-  protected Region<String, File> fileRegion;
-  protected Region<ChunkKey, byte[]> chunkRegion;
+  protected Region fileAndChunkRegion;
   protected final FileSystemStats fileSystemStats;
 
   public static final String FILES_REGION_SUFFIX = ".files";
-  public static final String CHUNKS_REGION_SUFFIX = ".chunks";
 
   public LuceneIndexForPartitionedRegion(String indexName, String regionPath, Cache cache) {
     super(indexName, regionPath, cache);
@@ -81,16 +78,12 @@ public class LuceneIndexForPartitionedRegion extends LuceneIndexImpl {
       regionShortCut = RegionShortcut.PARTITION;
     }
 
-    // create PR fileRegion, but not to create its buckets for now
+    // create PR fileAndChunkRegion, but not to create its buckets for now
     final String fileRegionName = createFileRegionName();
     PartitionAttributes partitionAttributes = dataRegion.getPartitionAttributes();
-    if (!fileRegionExists(fileRegionName)) {
-      fileRegion =
-          createFileRegion(regionShortCut, fileRegionName, partitionAttributes, regionAttributes);
-    }
+
 
     // create PR chunkRegion, but not to create its buckets for now
-    final String chunkRegionName = createChunkRegionName();
 
     // we will create RegionDirectories on the fly when data comes in
     HeterogeneousLuceneSerializer mapper = new HeterogeneousLuceneSerializer(getFieldNames());
@@ -99,23 +92,20 @@ public class LuceneIndexForPartitionedRegion extends LuceneIndexImpl {
     DM dm = ((GemFireCacheImpl) getCache()).getDistributedSystem().getDistributionManager();
     LuceneBucketListener lucenePrimaryBucketListener =
         new LuceneBucketListener(partitionedRepositoryManager, dm);
-    if (!chunkRegionExists(chunkRegionName)) {
-      chunkRegion = createChunkRegion(regionShortCut, fileRegionName, partitionAttributes,
-          chunkRegionName, regionAttributes, lucenePrimaryBucketListener);
+
+    if (!fileRegionExists(fileRegionName)) {
+      fileAndChunkRegion = createFileRegion(regionShortCut, fileRegionName, partitionAttributes,
+          regionAttributes, lucenePrimaryBucketListener);
     }
-    fileSystemStats.setFileSupplier(() -> (int) getFileRegion().getLocalSize());
-    fileSystemStats.setChunkSupplier(() -> (int) getChunkRegion().getLocalSize());
-    fileSystemStats.setBytesSupplier(() -> getChunkRegion().getPrStats().getDataStoreBytesInUse());
 
-    return partitionedRepositoryManager;
-  }
+    fileSystemStats
+        .setBytesSupplier(() -> getFileAndChunkRegion().getPrStats().getDataStoreBytesInUse());
 
-  public PartitionedRegion getFileRegion() {
-    return (PartitionedRegion) fileRegion;
+    return partitionedRepositoryManager;
   }
 
-  public PartitionedRegion getChunkRegion() {
-    return (PartitionedRegion) chunkRegion;
+  public PartitionedRegion getFileAndChunkRegion() {
+    return (PartitionedRegion) fileAndChunkRegion;
   }
 
   public FileSystemStats getFileSystemStats() {
@@ -123,35 +113,20 @@ public class LuceneIndexForPartitionedRegion extends LuceneIndexImpl {
   }
 
   boolean fileRegionExists(String fileRegionName) {
-    return cache.<String, File>getRegion(fileRegionName) != null;
+    return cache.getRegion(fileRegionName) != null;
   }
 
   Region createFileRegion(final RegionShortcut regionShortCut, final String fileRegionName,
-      final PartitionAttributes partitionAttributes, final RegionAttributes regionAttributes) {
+      final PartitionAttributes partitionAttributes, final RegionAttributes regionAttributes,
+      PartitionListener listener) {
     return createRegion(fileRegionName, regionShortCut, this.regionPath, partitionAttributes,
-        regionAttributes, null);
+        regionAttributes, listener);
   }
 
   public String createFileRegionName() {
     return LuceneServiceImpl.getUniqueIndexRegionName(indexName, regionPath, FILES_REGION_SUFFIX);
   }
 
-  boolean chunkRegionExists(String chunkRegionName) {
-    return cache.<ChunkKey, byte[]>getRegion(chunkRegionName) != null;
-  }
-
-  Region<ChunkKey, byte[]> createChunkRegion(final RegionShortcut regionShortCut,
-      final String fileRegionName, final PartitionAttributes partitionAttributes,
-      final String chunkRegionName, final RegionAttributes regionAttributes,
-      final PartitionListener lucenePrimaryBucketListener) {
-    return createRegion(chunkRegionName, regionShortCut, fileRegionName, partitionAttributes,
-        regionAttributes, lucenePrimaryBucketListener);
-  }
-
-  public String createChunkRegionName() {
-    return LuceneServiceImpl.getUniqueIndexRegionName(indexName, regionPath, CHUNKS_REGION_SUFFIX);
-  }
-
   private PartitionAttributesFactory configureLuceneRegionAttributesFactory(
       PartitionAttributesFactory attributesFactory,
       PartitionAttributes<?, ?> dataRegionAttributes) {
@@ -175,8 +150,7 @@ public class LuceneIndexForPartitionedRegion extends LuceneIndexImpl {
       final RegionShortcut regionShortCut, final String colocatedWithRegionName,
       final PartitionAttributes partitionAttributes, final RegionAttributes regionAttributes,
       PartitionListener lucenePrimaryBucketListener) {
-    PartitionAttributesFactory partitionAttributesFactory =
-        new PartitionAttributesFactory<String, File>();
+    PartitionAttributesFactory partitionAttributesFactory = new PartitionAttributesFactory();
     if (lucenePrimaryBucketListener != null) {
       partitionAttributesFactory.addPartitionListener(lucenePrimaryBucketListener);
     }
@@ -217,23 +191,13 @@ public class LuceneIndexForPartitionedRegion extends LuceneIndexImpl {
     // Invoke super destroy to remove the extension
     super.destroy(initiator);
 
-    // Destroy the chunk region (colocated with the file region)
-    // localDestroyRegion can't be used because locally destroying regions is not supported on
-    // colocated regions
-    if (!chunkRegion.isDestroyed()) {
-      chunkRegion.destroyRegion();
-      if (logger.isDebugEnabled()) {
-        logger.debug("Destroyed chunkRegion=" + chunkRegion.getName());
-      }
-    }
-
     // Destroy the file region (colocated with the application region)
     // localDestroyRegion can't be used because locally destroying regions is not supported on
     // colocated regions
-    if (!fileRegion.isDestroyed()) {
-      fileRegion.destroyRegion();
+    if (!fileAndChunkRegion.isDestroyed()) {
+      fileAndChunkRegion.destroyRegion();
       if (logger.isDebugEnabled()) {
-        logger.debug("Destroyed fileRegion=" + fileRegion.getName());
+        logger.debug("Destroyed fileAndChunkRegion=" + fileAndChunkRegion.getName());
       }
     }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/directory/RegionDirectory.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/directory/RegionDirectory.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/directory/RegionDirectory.java
index 18428ec..9cdf94b 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/directory/RegionDirectory.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/directory/RegionDirectory.java
@@ -47,10 +47,9 @@ public class RegionDirectory extends BaseDirectory {
    * Create a region directory with a given file and chunk region. These regions may be bucket
    * regions or they may be replicated regions.
    */
-  public RegionDirectory(Map<String, File> fileRegion, Map<ChunkKey, byte[]> chunkRegion,
-      FileSystemStats stats) {
+  public RegionDirectory(Map fileAndChunkRegion, FileSystemStats stats) {
     super(new SingleInstanceLockFactory());
-    fs = new FileSystem(fileRegion, chunkRegion, stats);
+    fs = new FileSystem(fileAndChunkRegion, stats);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystem.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystem.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystem.java
index 660816d..164955f 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystem.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystem.java
@@ -4,9 +4,9 @@
  * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance with the License. You may obtain a
  * copy of the License at
- * 
+ *
  * http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing, software distributed under the License
  * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
  * or implied. See the License for the specific language governing permissions and limitations under
@@ -15,7 +15,6 @@
 
 package org.apache.geode.cache.lucene.internal.filesystem;
 
-import org.apache.geode.cache.Region;
 import org.apache.geode.internal.logging.LogService;
 import org.apache.logging.log4j.Logger;
 
@@ -23,15 +22,15 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Collection;
 import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
+import java.util.stream.Collectors;
 
 /**
  * A Filesystem like interface that stores file data in geode regions.
- * 
+ *
  * This filesystem is safe for use with multiple threads if the threads are not modifying the same
  * files. A single file is not safe to modify by multiple threads, even between different members of
  * the distributed system.
- * 
+ *
  * Changes to a file may not be visible to other members of the system until the FileOutputStream is
  * closed.
  *
@@ -39,37 +38,34 @@ import java.util.concurrent.ConcurrentMap;
 public class FileSystem {
   private static final Logger logger = LogService.getLogger();
 
-  private final Map<String, File> fileRegion;
-  private final Map<ChunkKey, byte[]> chunkRegion;
+  private final Map fileAndChunkRegion;
 
   static final int CHUNK_SIZE = 1024 * 1024; // 1 MB
   private final FileSystemStats stats;
 
   /**
-   * Create filesystem that will store data in the two provided regions. The fileRegion contains
-   * metadata about the files, and the chunkRegion contains the actual data. If data from either
-   * region is missing or inconsistent, no guarantees are made about what this class will do, so
-   * it's best if these regions are colocated and in the same disk store to ensure the data remains
-   * together.
-   * 
-   * @param fileRegion the region to store metadata about the files
-   * @param chunkRegion the region to store actual file data.
+   * Create filesystem that will store data in the two provided regions. The fileAndChunkRegion
+   * contains metadata about the files, and the chunkRegion contains the actual data. If data from
+   * either region is missing or inconsistent, no guarantees are made about what this class will do,
+   * so it's best if these regions are colocated and in the same disk store to ensure the data
+   * remains together.
+   *
+   * @param fileAndChunkRegion the region to store metadata about the files
    */
-  public FileSystem(Map<String, File> fileRegion, Map<ChunkKey, byte[]> chunkRegion,
-      FileSystemStats stats) {
-    this.fileRegion = fileRegion;
-    this.chunkRegion = chunkRegion;
+  public FileSystem(Map fileAndChunkRegion, FileSystemStats stats) {
+    this.fileAndChunkRegion = fileAndChunkRegion;
     this.stats = stats;
   }
 
   public Collection<String> listFileNames() {
-    return fileRegion.keySet();
+    return (Collection<String>) fileAndChunkRegion.keySet().stream()
+        .filter(entry -> (entry instanceof String)).collect(Collectors.toList());
   }
 
   public File createFile(final String name) throws IOException {
     // TODO lock region ?
     final File file = new File(this, name);
-    if (null != fileRegion.putIfAbsent(name, file)) {
+    if (null != fileAndChunkRegion.putIfAbsent(name, file)) {
       throw new IOException("File exists.");
     }
     stats.incFileCreates(1);
@@ -79,7 +75,7 @@ public class FileSystem {
 
   public File putIfAbsentFile(String name, File file) throws IOException {
     // TODO lock region ?
-    if (null != fileRegion.putIfAbsent(name, file)) {
+    if (null != fileAndChunkRegion.putIfAbsent(name, file)) {
       throw new IOException("File exists.");
     }
     stats.incFileCreates(1);
@@ -94,7 +90,7 @@ public class FileSystem {
   }
 
   public File getFile(final String name) throws FileNotFoundException {
-    final File file = fileRegion.get(name);
+    final File file = (File) fileAndChunkRegion.get(name);
 
     if (null == file) {
       throw new FileNotFoundException(name);
@@ -111,7 +107,7 @@ public class FileSystem {
     // things crash in the middle of removing this file?
     // Seems like a file will be left with some
     // dangling chunks at the end of the file
-    File file = fileRegion.remove(name);
+    File file = (File) fileAndChunkRegion.remove(name);
     if (file == null) {
       throw new FileNotFoundException(name);
     }
@@ -121,7 +117,7 @@ public class FileSystem {
       final ChunkKey key = new ChunkKey(file.id, 0);
       while (true) {
         // TODO consider mutable ChunkKey
-        if (null == chunkRegion.remove(key)) {
+        if (null == fileAndChunkRegion.remove(key)) {
           // no more chunks
           break;
         }
@@ -133,7 +129,7 @@ public class FileSystem {
 
   public void renameFile(String source, String dest) throws IOException {
 
-    final File sourceFile = fileRegion.get(source);
+    final File sourceFile = (File) fileAndChunkRegion.get(source);
     if (null == sourceFile) {
       throw new FileNotFoundException(source);
     }
@@ -152,7 +148,7 @@ public class FileSystem {
     updateFile(sourceFile);
     putIfAbsentFile(dest, destFile);
 
-    fileRegion.remove(source);
+    fileAndChunkRegion.remove(source);
     stats.incFileRenames(1);
   }
 
@@ -162,14 +158,14 @@ public class FileSystem {
     // The file's metadata indicates that this chunk shouldn't
     // exist. Purge all of the chunks that are larger than the file metadata
     if (id >= file.chunks) {
-      while (chunkRegion.containsKey(key)) {
-        chunkRegion.remove(key);
+      while (fileAndChunkRegion.containsKey(key)) {
+        fileAndChunkRegion.remove(key);
         key.chunkId++;
       }
       return null;
     }
 
-    final byte[] chunk = chunkRegion.get(key);
+    final byte[] chunk = (byte[]) fileAndChunkRegion.get(key);
     if (chunk != null) {
       stats.incReadBytes(chunk.length);
     } else {
@@ -181,21 +177,18 @@ public class FileSystem {
 
   public void putChunk(final File file, final int id, final byte[] chunk) {
     final ChunkKey key = new ChunkKey(file.id, id);
-    chunkRegion.put(key, chunk);
+    fileAndChunkRegion.put(key, chunk);
     stats.incWrittenBytes(chunk.length);
   }
 
   void updateFile(File file) {
-    fileRegion.put(file.getName(), file);
+    fileAndChunkRegion.put(file.getName(), file);
   }
 
-  public Map<String, File> getFileRegion() {
-    return fileRegion;
+  public Map getFileAndChunkRegion() {
+    return fileAndChunkRegion;
   }
 
-  public Map<ChunkKey, byte[]> getChunkRegion() {
-    return chunkRegion;
-  }
 
   /**
    * Export all of the files in the filesystem to the provided directory
@@ -208,7 +201,6 @@ public class FileSystem {
       } catch (FileNotFoundException e) {
         // ignore this, it was concurrently removed
       }
-
     });
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystemStats.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystemStats.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystemStats.java
index 85ae6d7..09bb989 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystemStats.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystemStats.java
@@ -38,8 +38,6 @@ public class FileSystemStats {
   private static final int temporaryFileCreatesId;
   private static final int fileDeletesId;
   private static final int fileRenamesId;
-  private static final int filesId;
-  private static final int chunksId;
   private static final int bytesId;
 
   static {
@@ -63,8 +61,6 @@ public class FileSystemStats {
     temporaryFileCreatesId = statsType.nameToId("temporaryFileCreates");
     fileDeletesId = statsType.nameToId("fileDeletes");
     fileRenamesId = statsType.nameToId("fileRenames");
-    filesId = statsType.nameToId("files");
-    chunksId = statsType.nameToId("chunks");
     bytesId = statsType.nameToId("bytes");
   }
 
@@ -96,22 +92,6 @@ public class FileSystemStats {
     stats.incInt(fileRenamesId, delta);
   }
 
-  public void setFileSupplier(IntSupplier supplier) {
-    stats.setIntSupplier(filesId, supplier);
-  }
-
-  public int getFiles() {
-    return stats.getInt(filesId);
-  }
-
-  public void setChunkSupplier(IntSupplier supplier) {
-    stats.setIntSupplier(chunksId, supplier);
-  }
-
-  public int getChunks() {
-    return stats.getInt(chunksId);
-  }
-
   public void setBytesSupplier(LongSupplier supplier) {
     stats.setLongSupplier(bytesId, supplier);
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexDestroyDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexDestroyDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexDestroyDUnitTest.java
index 037c99f..c878fb7 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexDestroyDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexDestroyDUnitTest.java
@@ -439,11 +439,6 @@ public class LuceneIndexDestroyDUnitTest extends LuceneDUnitTest {
         LuceneIndexForPartitionedRegion.FILES_REGION_SUFFIX);
     assertNull(getCache().getRegion(filesRegionName));
 
-    // Verify the underlying chunks region no longer exists
-    String chunksRegionName = LuceneServiceImpl.getUniqueIndexRegionName(indexName, REGION_NAME,
-        LuceneIndexForPartitionedRegion.CHUNKS_REGION_SUFFIX);
-    assertNull(getCache().getRegion(chunksRegionName));
-
     // Verify the underlying AsyncEventQueue no longer exists
     String aeqId = LuceneServiceImpl.getUniqueIndexName(indexName, REGION_NAME);
     assertNull(getCache().getAsyncEventQueue(aeqId));

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexMaintenanceIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexMaintenanceIntegrationTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexMaintenanceIntegrationTest.java
index 00abb99..f5bd83f 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexMaintenanceIntegrationTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexMaintenanceIntegrationTest.java
@@ -143,8 +143,6 @@ public class LuceneIndexMaintenanceIntegrationTest extends LuceneIntegrationTest
     FileSystemStats fileSystemStats = index.getFileSystemStats();
     LuceneIndexStats indexStats = index.getIndexStats();
     await(() -> assertEquals(4, indexStats.getDocuments()));
-    await(() -> assertTrue(fileSystemStats.getFiles() > 0));
-    await(() -> assertTrue(fileSystemStats.getChunks() > 0));
     await(() -> assertTrue(fileSystemStats.getBytes() > 0));
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneQueriesPersistenceIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneQueriesPersistenceIntegrationTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneQueriesPersistenceIntegrationTest.java
index f7bb5ab..65b250c 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneQueriesPersistenceIntegrationTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneQueriesPersistenceIntegrationTest.java
@@ -88,8 +88,6 @@ public class LuceneQueriesPersistenceIntegrationTest extends LuceneIntegrationTe
 
     PartitionedRegion fileRegion = (PartitionedRegion) cache.getRegion(aeqId + ".files");
     assertNotNull(fileRegion);
-    PartitionedRegion chunkRegion = (PartitionedRegion) cache.getRegion(aeqId + ".chunks");
-    assertNotNull(chunkRegion);
     Assert.assertTrue(0 < userRegion.getDiskRegionStats().getNumOverflowOnDisk());
 
     LuceneQuery<Integer, Type1> query = service.createLuceneQueryFactory().create(INDEX_NAME,

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegionTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegionTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegionTest.java
index 93cc0a8..f2bfdd4 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegionTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegionTest.java
@@ -99,33 +99,6 @@ public class LuceneIndexForPartitionedRegionTest {
   }
 
   @Test
-  public void chunkRegionExistsWhenChunkRegionExistsShouldReturnTrue() {
-    String name = "indexName";
-    String regionPath = "regionName";
-    Cache cache = Fakes.cache();
-    PartitionedRegion region = mock(PartitionedRegion.class);
-    LuceneIndexForPartitionedRegion index =
-        new LuceneIndexForPartitionedRegion(name, regionPath, cache);
-    String chunkRegionName = index.createChunkRegionName();
-    when(cache.getRegion(chunkRegionName)).thenReturn(region);
-
-    assertTrue(index.chunkRegionExists(chunkRegionName));
-  }
-
-  @Test
-  public void chunkRegionExistsWhenChunkRegionDoesNotExistShouldReturnFalse() {
-    String name = "indexName";
-    String regionPath = "regionName";
-    Cache cache = Fakes.cache();
-    LuceneIndexForPartitionedRegion index =
-        new LuceneIndexForPartitionedRegion(name, regionPath, cache);
-    String chunkRegionName = index.createChunkRegionName();
-    when(cache.getRegion(chunkRegionName)).thenReturn(null);
-
-    assertFalse(index.chunkRegionExists(chunkRegionName));
-  }
-
-  @Test
   public void createAEQWithPersistenceCallsCreateOnAEQFactory() {
     String name = "indexName";
     String regionPath = "regionName";
@@ -244,30 +217,13 @@ public class LuceneIndexForPartitionedRegionTest {
       final LuceneIndexForPartitionedRegion index) {
     index.setSearchableFields(new String[] {"field"});
     LuceneIndexForPartitionedRegion spy = spy(index);
-    doReturn(null).when(spy).createFileRegion(any(), any(), any(), any());
-    doReturn(null).when(spy).createChunkRegion(any(), any(), any(), any(), any(), any());
+    doReturn(null).when(spy).createFileRegion(any(), any(), any(), any(), any());
     doReturn(null).when(spy).createAEQ(eq(region));
     spy.initialize();
     return spy;
   }
 
   @Test
-  public void initializeShouldCreatePartitionChunkRegion() {
-    boolean withPersistence = false;
-    String name = "indexName";
-    String regionPath = "regionName";
-    Cache cache = Fakes.cache();
-    Region region = initializeScenario(withPersistence, regionPath, cache);
-
-    LuceneIndexForPartitionedRegion index =
-        new LuceneIndexForPartitionedRegion(name, regionPath, cache);
-    LuceneIndexForPartitionedRegion spy = setupSpy(region, index);
-
-    verify(spy).createChunkRegion(eq(RegionShortcut.PARTITION), eq(index.createFileRegionName()),
-        any(), eq(index.createChunkRegionName()), any(), any());
-  }
-
-  @Test
   public void initializeShouldCreatePartitionFileRegion() {
     boolean withPersistence = false;
     String name = "indexName";
@@ -280,7 +236,7 @@ public class LuceneIndexForPartitionedRegionTest {
     LuceneIndexForPartitionedRegion spy = setupSpy(region, index);
 
     verify(spy).createFileRegion(eq(RegionShortcut.PARTITION), eq(index.createFileRegionName()),
-        any(), any());
+        any(), any(), any());
   }
 
   @Test
@@ -296,7 +252,7 @@ public class LuceneIndexForPartitionedRegionTest {
         new LuceneIndexForPartitionedRegion(name, regionPath, cache);
     LuceneIndexForPartitionedRegion indexSpy = spy(index);
     indexSpy.createFileRegion(RegionShortcut.PARTITION, index.createFileRegionName(),
-        partitionAttributes, regionAttributes);
+        partitionAttributes, regionAttributes, null);
     String fileRegionName = index.createFileRegionName();
     verify(indexSpy).createRegion(fileRegionName, RegionShortcut.PARTITION, regionPath,
         partitionAttributes, regionAttributes, null);
@@ -304,48 +260,6 @@ public class LuceneIndexForPartitionedRegionTest {
   }
 
   @Test
-  public void createChunkRegionWithPartitionShortcutCreatesRegionUsingCreateVMRegion()
-      throws Exception {
-    String name = "indexName";
-    String regionPath = "regionName";
-    GemFireCacheImpl cache = Fakes.cache();
-    PartitionAttributes partitionAttributes = initializeAttributes(cache);
-    RegionAttributes regionAttributes = mock(RegionAttributes.class);
-    when(regionAttributes.getDataPolicy()).thenReturn(DataPolicy.PARTITION);
-    LuceneIndexForPartitionedRegion index =
-        new LuceneIndexForPartitionedRegion(name, regionPath, cache);
-    LuceneIndexForPartitionedRegion indexSpy = spy(index);
-    String chunkRegionName = index.createChunkRegionName();
-    String fileRegionName = index.createFileRegionName();
-    indexSpy.createChunkRegion(RegionShortcut.PARTITION, fileRegionName, partitionAttributes,
-        chunkRegionName, regionAttributes, null);
-    verify(indexSpy).createRegion(chunkRegionName, RegionShortcut.PARTITION, fileRegionName,
-        partitionAttributes, regionAttributes, null);
-    verify(cache).createVMRegion(eq(chunkRegionName), any(), any());
-  }
-
-  @Test
-  public void initializeShouldCreatePartitionPersistentChunkRegion() {
-    boolean withPersistence = true;
-    String name = "indexName";
-    String regionPath = "regionName";
-    Cache cache = Fakes.cache();
-    initializeScenario(withPersistence, regionPath, cache);
-
-    LuceneIndexForPartitionedRegion index =
-        new LuceneIndexForPartitionedRegion(name, regionPath, cache);
-    index.setSearchableFields(new String[] {"field"});
-    LuceneIndexForPartitionedRegion spy = spy(index);
-    doReturn(null).when(spy).createFileRegion(any(), any(), any(), any());
-    doReturn(null).when(spy).createChunkRegion(any(), any(), any(), any(), any(), any());
-    doReturn(null).when(spy).createAEQ(any());
-    spy.initialize();
-
-    verify(spy).createChunkRegion(eq(RegionShortcut.PARTITION_PERSISTENT),
-        eq(index.createFileRegionName()), any(), eq(index.createChunkRegionName()), any(), any());
-  }
-
-  @Test
   public void initializeShouldCreatePartitionPersistentFileRegion() {
     boolean withPersistence = true;
     String name = "indexName";
@@ -357,13 +271,12 @@ public class LuceneIndexForPartitionedRegionTest {
         new LuceneIndexForPartitionedRegion(name, regionPath, cache);
     index.setSearchableFields(new String[] {"field"});
     LuceneIndexForPartitionedRegion spy = spy(index);
-    doReturn(null).when(spy).createFileRegion(any(), any(), any(), any());
-    doReturn(null).when(spy).createChunkRegion(any(), any(), any(), any(), any(), any());
+    doReturn(null).when(spy).createFileRegion(any(), any(), any(), any(), any());
     doReturn(null).when(spy).createAEQ(any());
     spy.initialize();
 
     verify(spy).createFileRegion(eq(RegionShortcut.PARTITION_PERSISTENT),
-        eq(index.createFileRegionName()), any(), any());
+        eq(index.createFileRegionName()), any(), any(), any());
   }
 
   @Test
@@ -378,14 +291,13 @@ public class LuceneIndexForPartitionedRegionTest {
         new LuceneIndexForPartitionedRegion(name, regionPath, cache);
     index.setSearchableFields(new String[] {"field"});
     LuceneIndexForPartitionedRegion spy = spy(index);
-    doReturn(null).when(spy).createFileRegion(any(), any(), any(), any());
-    doReturn(null).when(spy).createChunkRegion(any(), any(), any(), any(), any(), any());
+    doReturn(null).when(spy).createFileRegion(any(), any(), any(), any(), any());
     doReturn(null).when(spy).createAEQ(any());
     spy.initialize();
     spy.initialize();
 
     verify(spy).createFileRegion(eq(RegionShortcut.PARTITION_PERSISTENT),
-        eq(index.createFileRegionName()), any(), any());
+        eq(index.createFileRegionName()), any(), any(), any());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/LuceneIndexRecoveryHAIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/LuceneIndexRecoveryHAIntegrationTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/LuceneIndexRecoveryHAIntegrationTest.java
index f853baf..b6b1d23 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/LuceneIndexRecoveryHAIntegrationTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/LuceneIndexRecoveryHAIntegrationTest.java
@@ -69,8 +69,8 @@ public class LuceneIndexRecoveryHAIntegrationTest {
   }
 
   /**
-   * On rebalance, new repository manager will be created. It will try to read fileRegion and
-   * construct index. This test simulates the same.
+   * On rebalance, new repository manager will be created. It will try to read fileAndChunkRegion
+   * and construct index. This test simulates the same.
    */
   // @Test
   public void recoverRepoInANewNode()
@@ -99,7 +99,7 @@ public class LuceneIndexRecoveryHAIntegrationTest {
 
     // close the region to simulate bucket movement. New node will create repo using data persisted
     // by old region
-    // ((PartitionedRegion)index.fileRegion).close();
+    // ((PartitionedRegion)index.fileAndChunkRegion).close();
     // ((PartitionedRegion)index.chunkRegion).close();
     userRegion.close();
 

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/PartitionedRepositoryManagerJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/PartitionedRepositoryManagerJUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/PartitionedRepositoryManagerJUnitTest.java
index 9c603c7..87317cc 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/PartitionedRepositoryManagerJUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/PartitionedRepositoryManagerJUnitTest.java
@@ -29,7 +29,6 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.geode.cache.lucene.internal.partition.BucketTargetingMap;
-import org.apache.geode.distributed.DistributedLockService;
 import org.apache.geode.distributed.internal.locks.DLockService;
 import org.apache.geode.internal.cache.BucketAdvisor;
 import org.apache.geode.internal.cache.PartitionedRegionHelper;
@@ -63,15 +62,12 @@ import org.apache.geode.test.junit.categories.UnitTest;
 public class PartitionedRepositoryManagerJUnitTest {
 
   protected PartitionedRegion userRegion;
-  protected PartitionedRegion fileRegion;
-  protected PartitionedRegion chunkRegion;
+  protected PartitionedRegion fileAndChunkRegion;
   protected LuceneSerializer serializer;
   protected PartitionedRegionDataStore userDataStore;
   protected PartitionedRegionDataStore fileDataStore;
-  protected PartitionedRegionDataStore chunkDataStore;
 
-  protected Map<Integer, BucketRegion> fileBuckets = new HashMap<Integer, BucketRegion>();
-  protected Map<Integer, BucketRegion> chunkBuckets = new HashMap<Integer, BucketRegion>();
+  protected Map<Integer, BucketRegion> fileAndChunkBuckets = new HashMap<Integer, BucketRegion>();
   protected Map<Integer, BucketRegion> dataBuckets = new HashMap<Integer, BucketRegion>();
   protected LuceneIndexStats indexStats;
   protected FileSystemStats fileSystemStats;
@@ -100,20 +96,16 @@ public class PartitionedRepositoryManagerJUnitTest {
   }
 
   protected void createIndexAndRepoManager() {
-    fileRegion = Mockito.mock(PartitionedRegion.class);
+    fileAndChunkRegion = Mockito.mock(PartitionedRegion.class);
     fileDataStore = Mockito.mock(PartitionedRegionDataStore.class);
-    when(fileRegion.getDataStore()).thenReturn(fileDataStore);
-    when(fileRegion.getTotalNumberOfBuckets()).thenReturn(113);
-    when(fileRegion.getFullPath()).thenReturn("FileRegion");
-    chunkRegion = Mockito.mock(PartitionedRegion.class);
-    chunkDataStore = Mockito.mock(PartitionedRegionDataStore.class);
-    when(chunkRegion.getFullPath()).thenReturn("ChunkRegion");
-    when(chunkRegion.getDataStore()).thenReturn(chunkDataStore);
+    when(fileAndChunkRegion.getDataStore()).thenReturn(fileDataStore);
+    when(fileAndChunkRegion.getTotalNumberOfBuckets()).thenReturn(113);
+    when(fileAndChunkRegion.getFullPath()).thenReturn("FileRegion");
     indexStats = Mockito.mock(LuceneIndexStats.class);
     fileSystemStats = Mockito.mock(FileSystemStats.class);
     indexForPR = Mockito.mock(LuceneIndexForPartitionedRegion.class);
-    when(((LuceneIndexForPartitionedRegion) indexForPR).getFileRegion()).thenReturn(fileRegion);
-    when(((LuceneIndexForPartitionedRegion) indexForPR).getChunkRegion()).thenReturn(chunkRegion);
+    when(((LuceneIndexForPartitionedRegion) indexForPR).getFileAndChunkRegion())
+        .thenReturn(fileAndChunkRegion);
     when(((LuceneIndexForPartitionedRegion) indexForPR).getFileSystemStats())
         .thenReturn(fileSystemStats);
     when(indexForPR.getIndexStats()).thenReturn(indexStats);
@@ -159,7 +151,7 @@ public class PartitionedRepositoryManagerJUnitTest {
     assertNotNull(repo0);
     checkRepository(repo0, 0);
 
-    BucketRegion fileBucket0 = fileBuckets.get(0);
+    BucketRegion fileBucket0 = fileAndChunkBuckets.get(0);
     BucketRegion dataBucket0 = dataBuckets.get(0);
 
     // Simulate rebalancing of a bucket by marking the old bucket is destroyed
@@ -189,11 +181,11 @@ public class PartitionedRepositoryManagerJUnitTest {
 
     when(fileDataStore.getLocalBucketById(eq(0))).thenReturn(null);
 
-    when(fileRegion.getOrCreateNodeForBucketWrite(eq(0), (RetryTimeKeeper) any()))
+    when(fileAndChunkRegion.getOrCreateNodeForBucketWrite(eq(0), (RetryTimeKeeper) any()))
         .then(new Answer() {
           @Override
           public Object answer(InvocationOnMock invocation) throws Throwable {
-            when(fileDataStore.getLocalBucketById(eq(0))).thenReturn(fileBuckets.get(0));
+            when(fileDataStore.getLocalBucketById(eq(0))).thenReturn(fileAndChunkBuckets.get(0));
             return null;
           }
         });
@@ -241,36 +233,30 @@ public class PartitionedRepositoryManagerJUnitTest {
   protected void checkRepository(IndexRepositoryImpl repo0, int bucketId) {
     IndexWriter writer0 = repo0.getWriter();
     RegionDirectory dir0 = (RegionDirectory) writer0.getDirectory();
-    assertEquals(new BucketTargetingMap(fileBuckets.get(bucketId), bucketId),
-        dir0.getFileSystem().getFileRegion());
-    assertEquals(new BucketTargetingMap(chunkBuckets.get(bucketId), bucketId),
-        dir0.getFileSystem().getChunkRegion());
+    assertEquals(new BucketTargetingMap(fileAndChunkBuckets.get(bucketId), bucketId),
+        dir0.getFileSystem().getFileAndChunkRegion());
     assertEquals(serializer, repo0.getSerializer());
   }
 
   protected BucketRegion setUpMockBucket(int id) throws BucketNotFoundException {
     BucketRegion mockBucket = Mockito.mock(BucketRegion.class);
-    BucketRegion fileBucket = Mockito.mock(BucketRegion.class);
-    // Allowing the fileBucket to behave like a map so that the IndexWriter operations don't fail
-    Fakes.addMapBehavior(fileBucket);
-    when(fileBucket.getFullPath()).thenReturn("File" + id);
-    BucketRegion chunkBucket = Mockito.mock(BucketRegion.class);
-    when(chunkBucket.getFullPath()).thenReturn("Chunk" + id);
+    BucketRegion fileAndChunkBucket = Mockito.mock(BucketRegion.class);
+    // Allowing the fileAndChunkBucket to behave like a map so that the IndexWriter operations don't
+    // fail
+    Fakes.addMapBehavior(fileAndChunkBucket);
+    when(fileAndChunkBucket.getFullPath()).thenReturn("File" + id);
     when(mockBucket.getId()).thenReturn(id);
     when(userRegion.getBucketRegion(eq(id), eq(null))).thenReturn(mockBucket);
     when(userDataStore.getLocalBucketById(eq(id))).thenReturn(mockBucket);
     when(userRegion.getBucketRegion(eq(id + 113), eq(null))).thenReturn(mockBucket);
     when(userDataStore.getLocalBucketById(eq(id + 113))).thenReturn(mockBucket);
-    when(fileDataStore.getLocalBucketById(eq(id))).thenReturn(fileBucket);
-    when(chunkDataStore.getLocalBucketById(eq(id))).thenReturn(chunkBucket);
+    when(fileDataStore.getLocalBucketById(eq(id))).thenReturn(fileAndChunkBucket);
 
-    fileBuckets.put(id, fileBucket);
-    chunkBuckets.put(id, chunkBucket);
+    fileAndChunkBuckets.put(id, fileAndChunkBucket);
     dataBuckets.put(id, mockBucket);
 
     BucketAdvisor mockBucketAdvisor = Mockito.mock(BucketAdvisor.class);
-    when(fileBucket.getBucketAdvisor()).thenReturn(mockBucketAdvisor);
-    when(chunkBucket.getBucketAdvisor()).thenReturn(mockBucketAdvisor);
+    when(fileAndChunkBucket.getBucketAdvisor()).thenReturn(mockBucketAdvisor);
     when(mockBucketAdvisor.isPrimary()).thenReturn(true);
     return mockBucket;
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/directory/RegionDirectoryJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/directory/RegionDirectoryJUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/directory/RegionDirectoryJUnitTest.java
index 32249e4..3f468f4 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/directory/RegionDirectoryJUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/directory/RegionDirectoryJUnitTest.java
@@ -58,7 +58,6 @@ public class RegionDirectoryJUnitTest extends BaseDirectoryTestCase {
     // test asserts that no system properties have changed. Unfortunately, there is no
     // way to control the order of rules, so we can't clear this property with a rule
     // or @After method. Instead, do it in the close method of the directory.
-    return new RegionDirectory(new ConcurrentHashMap<String, File>(),
-        new ConcurrentHashMap<ChunkKey, byte[]>(), stats);
+    return new RegionDirectory(new ConcurrentHashMap(), stats);
   }
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/distributed/DistributedScoringJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/distributed/DistributedScoringJUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/distributed/DistributedScoringJUnitTest.java
index 6062904..41d90ef 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/distributed/DistributedScoringJUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/distributed/DistributedScoringJUnitTest.java
@@ -132,9 +132,8 @@ public class DistributedScoringJUnitTest {
   }
 
   private IndexRepositoryImpl createIndexRepo() throws IOException {
-    ConcurrentHashMap<String, File> fileRegion = new ConcurrentHashMap<String, File>();
-    ConcurrentHashMap<ChunkKey, byte[]> chunkRegion = new ConcurrentHashMap<ChunkKey, byte[]>();
-    RegionDirectory dir = new RegionDirectory(fileRegion, chunkRegion, fileSystemStats);
+    ConcurrentHashMap fileAndChunkRegion = new ConcurrentHashMap();
+    RegionDirectory dir = new RegionDirectory(fileAndChunkRegion, fileSystemStats);
 
     IndexWriterConfig config = new IndexWriterConfig(analyzer);
     IndexWriter writer = new IndexWriter(dir, config);

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystemJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystemJUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystemJUnitTest.java
index ee41e40..2008458 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystemJUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/filesystem/FileSystemJUnitTest.java
@@ -23,9 +23,11 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.lang.reflect.Method;
 import java.nio.file.Files;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.Map;
 import java.util.Random;
 import java.util.concurrent.ConcurrentHashMap;
 
@@ -35,7 +37,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
-import org.mockito.Spy;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -51,8 +52,7 @@ public class FileSystemJUnitTest {
 
   private FileSystem system;
   private Random rand = new Random();
-  private ConcurrentHashMap<String, File> fileRegion;
-  private ConcurrentHashMap<ChunkKey, byte[]> chunkRegion;
+  private ConcurrentHashMap fileAndChunkRegion;
 
   @Rule
   public DiskDirRule dirRule = new DiskDirRule();
@@ -60,10 +60,9 @@ public class FileSystemJUnitTest {
 
   @Before
   public void setUp() {
-    fileRegion = new ConcurrentHashMap<String, File>();
-    chunkRegion = new ConcurrentHashMap<ChunkKey, byte[]>();
+    fileAndChunkRegion = new ConcurrentHashMap();
     fileSystemStats = mock(FileSystemStats.class);
-    system = new FileSystem(fileRegion, chunkRegion, fileSystemStats);
+    system = new FileSystem(fileAndChunkRegion, fileSystemStats);
   }
 
   /**
@@ -201,7 +200,7 @@ public class FileSystemJUnitTest {
 
     // Assert that there are only 2 chunks in the system, since we wrote just
     // past the end of the first chunk.
-    assertEquals(2, chunkRegion.size());
+    assertEquals(2, numberOfChunks(fileAndChunkRegion));
 
     SeekableInputStream in = file.getInputStream();
 
@@ -254,7 +253,7 @@ public class FileSystemJUnitTest {
     file1 = system.getFile(name1);
     file2 = system.getFile(name2);
 
-    assertEquals(new HashSet(Arrays.asList(name1, name2)), system.listFileNames());
+    assertEquals(Arrays.asList(name1, name2), system.listFileNames());
     assertContents(file1Data, file1);
     assertContents(file2Data, file2);
 
@@ -265,7 +264,7 @@ public class FileSystemJUnitTest {
     } catch (IOException expected) {
 
     }
-    assertEquals(new HashSet(Arrays.asList(name1, name2)), system.listFileNames());
+    assertEquals(Arrays.asList(name1, name2), system.listFileNames());
     assertContents(file1Data, file1);
     assertContents(file2Data, file2);
 
@@ -275,17 +274,17 @@ public class FileSystemJUnitTest {
 
     File file3 = system.getFile(name3);
 
-    assertEquals(new HashSet(Arrays.asList(name3, name2)), system.listFileNames());
+    assertEquals(Arrays.asList(name3, name2), system.listFileNames());
     assertContents(file1Data, file3);
     assertContents(file2Data, file2);
 
     system.deleteFile(name2);
 
-    assertEquals(new HashSet(Arrays.asList(name3)), system.listFileNames());
+    assertEquals(Arrays.asList(name3), system.listFileNames());
 
     system.renameFile(name3, name2);
 
-    assertEquals(new HashSet(Arrays.asList(name2)), system.listFileNames());
+    assertEquals(Arrays.asList(name2), system.listFileNames());
 
     file2 = system.getFile(name2);
     assertContents(file1Data, file2);
@@ -313,7 +312,7 @@ public class FileSystemJUnitTest {
     byte[] bytes = getRandomBytes(size);
     file1.getOutputStream().write(bytes);
 
-    FileSystem system2 = new FileSystem(fileRegion, chunkRegion, fileSystemStats);
+    FileSystem system2 = new FileSystem(fileAndChunkRegion, fileSystemStats);
     File file = system2.getFile(name1);
 
     assertTrue(file.getLength() <= bytes.length);
@@ -325,7 +324,7 @@ public class FileSystemJUnitTest {
 
     if (length == 0) {
       assertEquals(-1, file.getInputStream().read(results));
-      assertTrue(chunkRegion.isEmpty());
+      assertEquals(0, numberOfChunks(fileAndChunkRegion));
     } else {
       // Make sure the amount of data we can read matches the length
       assertEquals(length, file.getInputStream().read(results));
@@ -349,12 +348,11 @@ public class FileSystemJUnitTest {
     // Create a couple of mock regions where we count the operations
     // that happen to them. We will then use this to abort the rename
     // in the middle.
-    ConcurrentHashMap<String, File> spyFileRegion =
-        mock(ConcurrentHashMap.class, new SpyWrapper(countOperations, fileRegion));
-    ConcurrentHashMap<ChunkKey, byte[]> spyChunkRegion =
-        mock(ConcurrentHashMap.class, new SpyWrapper(countOperations, chunkRegion));
+    ConcurrentHashMap spyFileAndChunkRegion =
+        mock(ConcurrentHashMap.class, new SpyWrapper(countOperations, fileAndChunkRegion));
 
-    system = new FileSystem(spyFileRegion, spyChunkRegion, fileSystemStats);
+
+    system = new FileSystem(spyFileAndChunkRegion, fileSystemStats);
 
     String name = "file";
     File file = system.createFile(name);
@@ -394,7 +392,7 @@ public class FileSystemJUnitTest {
 
     }
 
-    system = new FileSystem(fileRegion, chunkRegion, fileSystemStats);
+    system = new FileSystem(fileAndChunkRegion, fileSystemStats);
 
     // This is not the ideal behavior. We are left
     // with two duplicate files. However, we will still
@@ -409,72 +407,6 @@ public class FileSystemJUnitTest {
     assertContents(expectedBytes, destFile);
   }
 
-  /**
-   * Test what happens a file delete is aborted in the middle due to the a cache closed exception.
-   * The next member that uses those files should be able to clean up after the partial rename.
-   */
-  @Test
-  public void testPartialDelete() throws Exception {
-
-    final CountOperations countOperations = new CountOperations();
-    // Create a couple of mock regions where we count the operations
-    // that happen to them. We will then use this to abort the rename
-    // in the middle.
-    ConcurrentHashMap<String, File> spyFileRegion =
-        mock(ConcurrentHashMap.class, new SpyWrapper(countOperations, fileRegion));
-    ConcurrentHashMap<ChunkKey, byte[]> spyChunkRegion =
-        mock(ConcurrentHashMap.class, new SpyWrapper(countOperations, chunkRegion));
-
-    system = new FileSystem(spyFileRegion, spyChunkRegion, fileSystemStats);
-
-    String name1 = "file1";
-    String name2 = "file2";
-    File file1 = system.createFile(name1);
-    File file2 = system.createFile(name2);
-
-    ByteArrayOutputStream expected = new ByteArrayOutputStream();
-
-    // Make sure the file has a lot of chunks
-    for (int i = 0; i < 10; i++) {
-      byte[] bytes = writeRandomBytes(file1);
-      writeBytes(file2, bytes);
-      expected.write(bytes);
-    }
-
-    countOperations.reset();
-
-    system.deleteFile(name1);
-
-    assertTrue(2 <= countOperations.count);
-
-    countOperations.after(countOperations.count / 2, new Runnable() {
-
-      @Override
-      public void run() {
-        throw new CacheClosedException();
-      }
-    });
-    countOperations.reset();
-
-    try {
-      system.deleteFile(name2);
-      fail("should have seen an error");
-    } catch (CacheClosedException expectedException) {
-    }
-
-    system = new FileSystem(fileRegion, chunkRegion, fileSystemStats);
-
-    if (system.listFileNames().size() == 0) {
-      // File was deleted, but shouldn't have any dangling chunks at this point
-      assertEquals(Collections.EMPTY_SET, fileRegion.keySet());
-      // TODO - need to purge chunks of deleted files somehow.
-      // assertIndexDetailsEquals(Collections.EMPTY_SET, chunkRegion.keySet());
-    } else {
-      file2 = system.getFile(name2);
-      assertContents(expected.toByteArray(), file2);
-    }
-  }
-
   @Test
   public void testExport() throws IOException {
     String name1 = "testFile1";
@@ -544,8 +476,8 @@ public class FileSystemJUnitTest {
 
   @Test
   public void testDeletePossiblyRenamedFileDoesNotDestroyChunks() throws Exception {
-    ConcurrentHashMap<String, File> spyFileRegion = Mockito.spy(fileRegion);
-    system = new FileSystem(spyFileRegion, chunkRegion, fileSystemStats);
+    ConcurrentHashMap spyFileRegion = Mockito.spy(fileAndChunkRegion);
+    system = new FileSystem(spyFileRegion, fileSystemStats);
 
     String sourceFileName = "sourceFile";
     File file1 = system.createFile(sourceFileName);
@@ -610,6 +542,10 @@ public class FileSystemJUnitTest {
     return data;
   }
 
+  private long numberOfChunks(Map map) {
+    return map.keySet().parallelStream().filter(k -> (k instanceof ChunkKey)).count();
+  }
+
   /**
    * A wrapper around an object that will also invoke a callback before applying an operation.
    *

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/repository/IndexRepositoryImplJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/repository/IndexRepositoryImplJUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/repository/IndexRepositoryImplJUnitTest.java
index 42cc2bc..78fc657 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/repository/IndexRepositoryImplJUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/repository/IndexRepositoryImplJUnitTest.java
@@ -69,10 +69,9 @@ public class IndexRepositoryImplJUnitTest {
 
   @Before
   public void setUp() throws IOException {
-    ConcurrentHashMap<String, File> fileRegion = new ConcurrentHashMap<String, File>();
-    ConcurrentHashMap<ChunkKey, byte[]> chunkRegion = new ConcurrentHashMap<ChunkKey, byte[]>();
+    ConcurrentHashMap fileAndChunkRegion = new ConcurrentHashMap();
     fileSystemStats = mock(FileSystemStats.class);
-    RegionDirectory dir = new RegionDirectory(fileRegion, chunkRegion, fileSystemStats);
+    RegionDirectory dir = new RegionDirectory(fileAndChunkRegion, fileSystemStats);
     IndexWriterConfig config = new IndexWriterConfig(analyzer);
     writer = new IndexWriter(dir, config);
     String[] indexedFields = new String[] {"s", "i", "l", "d", "f", "s2", "missing"};

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/repository/IndexRepositoryImplPerformanceTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/repository/IndexRepositoryImplPerformanceTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/repository/IndexRepositoryImplPerformanceTest.java
index 9067376..46425b9 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/repository/IndexRepositoryImplPerformanceTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/repository/IndexRepositoryImplPerformanceTest.java
@@ -93,12 +93,10 @@ public class IndexRepositoryImplPerformanceTest {
       @Override
       public void init() throws Exception {
         cache = new CacheFactory().set(MCAST_PORT, "0").set(LOG_LEVEL, "error").create();
-        Region<String, File> fileRegion =
-            cache.<String, File>createRegionFactory(RegionShortcut.REPLICATE).create("files");
-        Region<ChunkKey, byte[]> chunkRegion =
-            cache.<ChunkKey, byte[]>createRegionFactory(RegionShortcut.REPLICATE).create("chunks");
+        Region fileAndChunkRegion =
+            cache.createRegionFactory(RegionShortcut.REPLICATE).create("files");
 
-        RegionDirectory dir = new RegionDirectory(fileRegion, chunkRegion,
+        RegionDirectory dir = new RegionDirectory(fileAndChunkRegion,
             new FileSystemStats(cache.getDistributedSystem(), "region-index"));
         final LuceneIndexStats stats =
             new LuceneIndexStats(cache.getDistributedSystem(), "region-index");
@@ -108,7 +106,7 @@ public class IndexRepositoryImplPerformanceTest {
         writer = new IndexWriter(dir, config);
         String[] indexedFields = new String[] {"text"};
         HeterogeneousLuceneSerializer mapper = new HeterogeneousLuceneSerializer(indexedFields);
-        repo = new IndexRepositoryImpl(fileRegion, writer, mapper, stats, null);
+        repo = new IndexRepositoryImpl(fileAndChunkRegion, writer, mapper, stats, null);
       }
 
       @Override
@@ -213,8 +211,7 @@ public class IndexRepositoryImplPerformanceTest {
       public void init() throws Exception {
         cache = new CacheFactory().set(MCAST_PORT, "0").set(LOG_LEVEL, "warning").create();
         final FileSystemStats stats = new FileSystemStats(cache.getDistributedSystem(), "stats");
-        RegionDirectory dir = new RegionDirectory(new ConcurrentHashMap<String, File>(),
-            new ConcurrentHashMap<ChunkKey, byte[]>(), stats);
+        RegionDirectory dir = new RegionDirectory(new ConcurrentHashMap(), stats);
         IndexWriterConfig config = new IndexWriterConfig(analyzer);
         writer = new IndexWriter(dir, config);
         searcherManager = new SearcherManager(writer, true, true, null);

http://git-wip-us.apache.org/repos/asf/geode/blob/d8a89730/geode-lucene/src/test/java/org/apache/geode/cache/lucene/test/LuceneTestUtilities.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/test/LuceneTestUtilities.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/test/LuceneTestUtilities.java
index e2d3abc..670e6a8 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/test/LuceneTestUtilities.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/test/LuceneTestUtilities.java
@@ -168,10 +168,7 @@ public class LuceneTestUtilities {
     LuceneIndexForPartitionedRegion index =
         (LuceneIndexForPartitionedRegion) luceneService.getIndex(INDEX_NAME, REGION_NAME);
 
-    // Verify the meta regions exist and are internal
-    LocalRegion chunkRegion = (LocalRegion) cache.getRegion(index.createChunkRegionName());
     LocalRegion fileRegion = (LocalRegion) cache.getRegion(index.createFileRegionName());
-    verify.accept(chunkRegion);
     verify.accept(fileRegion);
   }
 


[10/10] geode git commit: GEODE-2645: rewrite test to fix flakiness and improve readability

Posted by kl...@apache.org.
GEODE-2645: rewrite test to fix flakiness and improve readability


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/b48f5a35
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/b48f5a35
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/b48f5a35

Branch: refs/heads/feature/GEODE-2645
Commit: b48f5a35450c1f9b0a2e16f9ac278efd9d9801a7
Parents: f329f4a
Author: Kirk Lund <kl...@apache.org>
Authored: Mon Mar 13 14:51:36 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Wed Mar 15 13:10:59 2017 -0700

----------------------------------------------------------------------
 .../geode/cache30/CacheLogRollDUnitTest.java    | 500 -------------------
 .../logging/CacheLogRollingIntegrationTest.java | 349 +++++++++++++
 2 files changed, 349 insertions(+), 500 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/b48f5a35/geode-core/src/test/java/org/apache/geode/cache30/CacheLogRollDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache30/CacheLogRollDUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache30/CacheLogRollDUnitTest.java
deleted file mode 100644
index 3a0becf..0000000
--- a/geode-core/src/test/java/org/apache/geode/cache30/CacheLogRollDUnitTest.java
+++ /dev/null
@@ -1,500 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.cache30;
-
-import static org.apache.geode.distributed.ConfigurationProperties.*;
-import static org.junit.Assert.*;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileReader;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.util.Properties;
-import java.util.regex.Pattern;
-
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.geode.distributed.DistributedSystem;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
-import org.apache.geode.internal.logging.InternalLogWriter;
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.junit.categories.DistributedTest;
-import org.apache.geode.test.junit.categories.FlakyTest;
-
-/**
- * Test to make sure cache close is working.
- *
- * @since GemFire 6.5
- */
-@Category(DistributedTest.class)
-public class CacheLogRollDUnitTest extends JUnit4CacheTestCase {
-
-  private void logAndRollAndVerify(String baseLogName, DistributedSystem ds, String mainId)
-      throws FileNotFoundException, IOException {
-    String logfile = baseLogName + ".log";
-    String metaLogFile = "meta-" + baseLogName + "-" + mainId + ".log";
-    String rolledLogFile1 = baseLogName + "-" + mainId + "-01.log";
-    String rolledLogFile2 = baseLogName + "-" + mainId + "-02.log";
-    String META_MARKER_1 = "Switching to log " + baseLogName + ".log";
-    String META_MARKER_2 = "Rolling current log to " + baseLogName + "-" + mainId + "-01.log";
-    String META_MARKER_3 = "Rolling current log to " + baseLogName + "-" + mainId + "-02.log";
-
-    String FIRST_CHILD_MARKER = "hey guys im the first child whatsup";
-    String LOG_NONSENSE =
-        "what is your story what are you doing hey whatsup i can't believe it wow";
-    System.out.println("LOGNAME:" + logfile);
-    /*
-     * 1. Lets assert that the logfile exists and that it is a proper normal logfile 2. Asser that
-     * the meta logfile exists and has good stuff in it 3. Let's log a bunch and show that we
-     * rolled, 4. Show that old file has right old stuff in it 5. Show that new file has right new
-     * stuff in it 6. Show that meta has right stuff in it
-     */
-
-    ds.getLogWriter().info(FIRST_CHILD_MARKER);
-
-    File f = new File(logfile);
-    assertTrue("log-file :" + logfile + " should exist", f.exists());
-
-    File fm = new File(metaLogFile);
-    assertTrue("meta-log-file :" + metaLogFile + " should exist", fm.exists());
-
-    File f1 = new File(rolledLogFile1);
-    assertTrue("child-log-file :" + rolledLogFile1 + " should'nt exist", !f1.exists());
-
-    File f2 = new File(rolledLogFile2);
-    assertTrue("child-log-file2 :" + rolledLogFile2 + " should'nt exist yet", !f2.exists());
-
-
-    String metalog = getLogContents(metaLogFile);
-    assertTrue("metalog(" + metaLogFile + ") should have " + META_MARKER_1 + " in it:\n" + metalog,
-        metalog.indexOf(META_MARKER_1) != -1);
-
-    String mainlog = getLogContents(logfile);
-    assertTrue("log(" + logfile + ") should have " + FIRST_CHILD_MARKER + " in it:\n" + mainlog,
-        mainlog.indexOf(FIRST_CHILD_MARKER) != -1);
-
-    int i = 0;
-    while (i < 100000 && !f2.exists()) {
-      i++;
-      ds.getLogWriter().info(LOG_NONSENSE);
-    }
-
-    assertTrue("child-log-file1 :" + rolledLogFile1 + " should exist now", f1.exists());
-    assertTrue("child-log-file2 :" + rolledLogFile2 + " should exist now", f2.exists());
-
-    metalog = getLogContents(metaLogFile);
-
-    assertTrue("log(" + metaLogFile + ") should have " + META_MARKER_2 + " in it:\n" + metalog,
-        metalog.indexOf(META_MARKER_2) != -1);
-    assertTrue("log(" + metaLogFile + ") should have " + META_MARKER_3 + " in it:\n" + metalog,
-        metalog.indexOf(META_MARKER_3) != -1);
-    assertTrue("log(" + metaLogFile + ") should'nt have " + LOG_NONSENSE + " in it:\n" + metalog,
-        metalog.indexOf(LOG_NONSENSE) == -1);
-
-    {
-      String logChild2 = getLogContents(logfile);
-      assertTrue("log(" + logfile + ") should have " + LOG_NONSENSE + " in it:\n" + logChild2,
-          logChild2.indexOf(LOG_NONSENSE) != -1);
-    }
-
-    {
-      String logChild2 = getLogContents(rolledLogFile1);
-      assertTrue(
-          "log(" + rolledLogFile1 + ") should have " + LOG_NONSENSE + " in it:\n" + logChild2,
-          logChild2.indexOf(LOG_NONSENSE) != -1);
-    }
-
-    {
-      String logChild2 = getLogContents(rolledLogFile2);
-      assertTrue(
-          "log(" + rolledLogFile2 + ") should have " + LOG_NONSENSE + " in it:\n" + logChild2,
-          logChild2.indexOf(LOG_NONSENSE) != -1);
-    }
-  }
-
-  private void SecurityLogAndRollAndVerify(String baseLogName, DistributedSystem ds, String mainId)
-      throws FileNotFoundException, IOException {
-    String logfile = baseLogName + ".log";
-    String sec_logfile = "sec_" + logfile;
-    String metaLogFile = "meta-" + baseLogName + "-" + mainId + ".log";
-    String rolledLogFile1 = baseLogName + "-" + mainId + "-01.log";
-    String rolledLogFile2 = baseLogName + "-" + mainId + "-02.log";
-    String rolledSecLogFile1 = "sec_" + baseLogName + "-" + mainId + "-01.log";
-    String rolledSecLogFile2 = "sec_" + baseLogName + "-" + mainId + "-02.log";
-    String META_MARKER_1 = "Switching to log " + baseLogName + ".log";
-    String META_MARKER_2 = "Rolling current log to " + baseLogName + "-" + mainId + "-01.log";
-    String META_MARKER_3 = "Rolling current log to " + baseLogName + "-" + mainId + "-02.log";
-
-    String FIRST_CHILD_MARKER = "hey guys im the first child whatsup";
-    String LOG_NONSENSE =
-        "what is your story what are you doing hey whatsup i can't believe it wow";
-    System.out.println("LOGNAME:" + logfile + ", SECLOGNAME:" + sec_logfile);
-    /*
-     * 1. Lets assert that the logfile exists and that it is a proper normal logfile 2. Asser that
-     * the meta logfile exists and has good stuff in it 3. Let's log a bunch and show that we
-     * rolled, 4. Show that old file has right old stuff in it 5. Show that new file has right new
-     * stuff in it 6. Show that meta has right stuff in it
-     */
-
-    ds.getLogWriter().info(FIRST_CHILD_MARKER);
-    ds.getSecurityLogWriter().info(FIRST_CHILD_MARKER);
-
-    File f = new File(logfile);
-    File sec_f = new File(sec_logfile);
-    assertTrue("log-file :" + logfile + " should exist", f.exists());
-    assertTrue("security-log-file :" + sec_logfile + " should exist", sec_f.exists());
-
-    File fm = new File(metaLogFile);
-    assertTrue("meta-log-file :" + metaLogFile + " should exist", fm.exists());
-
-    File f1 = new File(rolledLogFile1);
-    File sec_f1 = new File(rolledSecLogFile1);
-    assertTrue("child-log-file :" + rolledLogFile1 + " should'nt exist", !f1.exists());
-    assertTrue("security-child-log-file :" + rolledLogFile1 + " should'nt exist", !sec_f1.exists());
-
-    File f2 = new File(rolledLogFile2);
-    File sec_f2 = new File(rolledSecLogFile2);
-    assertTrue("child-log-file2 :" + rolledLogFile2 + " should'nt exist yet", !f2.exists());
-    assertTrue("security-child-log-file2 :" + rolledSecLogFile2 + " should'nt exist yet",
-        !sec_f2.exists());
-
-
-    String metalog = getLogContents(metaLogFile);
-    assertTrue("metalog(" + metaLogFile + ") should have " + META_MARKER_1 + " in it:\n" + metalog,
-        metalog.indexOf(META_MARKER_1) != -1);
-
-    String mainlog = getLogContents(logfile);
-    assertTrue("log(" + logfile + ") should have " + FIRST_CHILD_MARKER + " in it:\n" + mainlog,
-        mainlog.indexOf(FIRST_CHILD_MARKER) != -1);
-    String sec_mainlog = getLogContents(sec_logfile);
-    assertTrue(
-        "log(" + sec_logfile + ") should have " + FIRST_CHILD_MARKER + " in it:\n" + sec_mainlog,
-        sec_mainlog.indexOf(FIRST_CHILD_MARKER) != -1);
-
-    int i = 0;
-    while (i < 100000 && !f2.exists()) {
-      i++;
-      ds.getLogWriter().info(LOG_NONSENSE);
-    }
-
-    int j = 0;
-    while (j < 100000 && !sec_f2.exists()) {
-      j++;
-      ds.getSecurityLogWriter().info(LOG_NONSENSE);
-    }
-
-    assertTrue("child-log-file1 :" + rolledLogFile1 + " should exist now", f1.exists());
-    assertTrue("child-log-file2 :" + rolledLogFile2 + " should exist now", f2.exists());
-    assertTrue("security-child-log-file1 :" + rolledSecLogFile1 + " should exist now",
-        sec_f1.exists());
-    assertTrue("security-child-log-file2 :" + rolledSecLogFile2 + " should exist now",
-        sec_f2.exists());
-
-    metalog = getLogContents(metaLogFile);
-
-    assertTrue("log(" + metaLogFile + ") should have " + META_MARKER_2 + " in it:\n" + metalog,
-        metalog.indexOf(META_MARKER_2) != -1);
-    assertTrue("log(" + metaLogFile + ") should have " + META_MARKER_3 + " in it:\n" + metalog,
-        metalog.indexOf(META_MARKER_3) != -1);
-    assertTrue("log(" + metaLogFile + ") should'nt have " + LOG_NONSENSE + " in it:\n" + metalog,
-        metalog.indexOf(LOG_NONSENSE) == -1);
-
-    {
-      String logChild2 = getLogContents(logfile);
-      assertTrue("log(" + logfile + ") should have " + LOG_NONSENSE + " in it:\n" + logChild2,
-          logChild2.indexOf(LOG_NONSENSE) != -1);
-      String sec_logChild2 = getLogContents(sec_logfile);
-      assertTrue(
-          "log(" + sec_logfile + ") should have " + LOG_NONSENSE + " in it:\n" + sec_logChild2,
-          sec_logChild2.indexOf(LOG_NONSENSE) != -1);
-    }
-
-    {
-      String logChild2 = getLogContents(rolledLogFile1);
-      assertTrue(
-          "log(" + rolledLogFile1 + ") should have " + LOG_NONSENSE + " in it:\n" + logChild2,
-          logChild2.indexOf(LOG_NONSENSE) != -1);
-      String sec_logChild2 = getLogContents(rolledSecLogFile1);
-      assertTrue("log(" + rolledSecLogFile1 + ") should have " + LOG_NONSENSE + " in it:\n"
-          + sec_logChild2, sec_logChild2.indexOf(LOG_NONSENSE) != -1);
-    }
-
-    {
-      String logChild2 = getLogContents(rolledLogFile2);
-      assertTrue(
-          "log(" + rolledLogFile2 + ") should have " + LOG_NONSENSE + " in it:\n" + logChild2,
-          logChild2.indexOf(LOG_NONSENSE) != -1);
-      String sec_logChild2 = getLogContents(rolledSecLogFile2);
-      assertTrue("log(" + rolledSecLogFile2 + ") should have " + LOG_NONSENSE + " in it:\n"
-          + sec_logChild2, sec_logChild2.indexOf(LOG_NONSENSE) != -1);
-    }
-  }
-
-  @Test
-  public void testDiskSpace() throws Exception {
-    Properties props = new Properties();
-    String baseLogName = "diskarito";
-    String logfile = baseLogName + ".log";
-    props.put(LOG_FILE, logfile);
-    props.put(LOG_FILE_SIZE_LIMIT, "1");
-    DistributedSystem ds = this.getSystem(props);
-    props.put(LOG_DISK_SPACE_LIMIT, "200");
-    for (int i = 0; i < 10; i++) {
-      ds = this.getSystem(props);
-      ds.disconnect();
-    }
-
-    /*
-     * This was throwing NPEs until my fix...
-     */
-  }
-
-  @Category(FlakyTest.class) // GEODE-674: possible disk pollution, file size sensitive
-  @Test
-  public void testSimpleStartRestartWithRolling() throws Exception {
-    Properties props = new Properties();
-    String baseLogName = "restarto";
-    String logfile = baseLogName + ".log";
-    props.put(LOG_FILE, logfile);
-    props.put(LOG_FILE_SIZE_LIMIT, "1");
-    props.put(LOG_DISK_SPACE_LIMIT, "200");
-    props.put(LOG_LEVEL, "config");
-    InternalDistributedSystem ids = getSystem(props);
-    assertEquals(InternalLogWriter.INFO_LEVEL,
-        ((InternalLogWriter) ids.getLogWriter()).getLogWriterLevel());
-    ids.disconnect();
-    String mainId;
-    {
-      final Pattern mainIdPattern = Pattern.compile("meta-" + baseLogName + "-\\d\\d.log");
-      File[] metaLogs = new File(".").listFiles(new FilenameFilter() {
-        public boolean accept(File d, String name) {
-          return mainIdPattern.matcher(name).matches();
-        }
-      });
-      assertEquals(1, metaLogs.length);
-      String f = metaLogs[0].getName();
-      int idx = f.lastIndexOf("-");
-      int idx2 = f.lastIndexOf(".");
-      mainId = f.substring(idx + 1, idx2);
-    }
-    String metaName = "meta-" + baseLogName + "-" + mainId + ".log";
-    File fm = new File(metaName);
-    assertTrue("Ok so metalog:" + metaName + " better exist:", fm.exists());
-    for (int i = 1; i < 10; i++) {
-      int mainInt = Integer.parseInt(mainId) + (i);
-      String myid;
-      if (mainInt < 10) {
-        myid = "0" + mainInt;
-      } else {
-        myid = "" + mainInt;
-      }
-      String oldMain;
-      if (mainInt < 11) {
-        oldMain = "0" + (mainInt - 1);
-      } else {
-        oldMain = "" + (mainInt - 1);
-      }
-      String lfold = "meta-" + baseLogName + "-" + (oldMain) + ".log";
-      File fold = new File(lfold);
-      assertTrue(
-          "before we even get going here[" + i + "] mainInt:" + mainInt + " myid:" + myid + " "
-              + lfold + " should exist the metaname was :" + metaName + " and it should match that",
-          fold.exists());
-      String lf = "meta-" + baseLogName + "-" + myid + ".log";
-      String lfl = baseLogName + "-" + (oldMain) + "-01.log";
-      File f1m = new File(lf);
-      File f1l = new File(lfl);
-      assertTrue(!f1m.exists());
-      assertTrue(!f1l.exists());
-      DistributedSystem ds = this.getSystem(props);
-      assertTrue("We are hoping that:" + lf + " exists", f1m.exists());
-      assertTrue("We are hoping that:" + lfl + " exists", f1l.exists());
-      ds.disconnect();
-    }
-  }
-
-  @Category(FlakyTest.class) // GEODE-677: possible disk pollution, file size sensitive
-  @Test
-  public void testStartWithRollingThenRestartWithRolling() throws Exception {
-    Properties props = new Properties();
-    String baseLogName = "biscuits";
-    String logfile = baseLogName + ".log";
-    props.put(LOG_FILE, logfile);
-    props.put(LOG_FILE_SIZE_LIMIT, "1");
-    props.put(LOG_LEVEL, "config");
-    DistributedSystem ds = getSystem(props);
-    InternalDistributedSystem ids = (InternalDistributedSystem) ds;
-    assertEquals(InternalLogWriter.INFO_LEVEL,
-        ((InternalLogWriter) ids.getLogWriter()).getLogWriterLevel());
-
-    // Lets figure out the mainId we start with
-    String mainId;
-    {
-      final Pattern mainIdPattern = Pattern.compile("meta-" + baseLogName + "-\\d\\d\\d*.log");
-      File[] metaLogs = new File(".").listFiles(new FilenameFilter() {
-        public boolean accept(File d, String name) {
-          return mainIdPattern.matcher(name).matches();
-        }
-      });
-      assertEquals(1, metaLogs.length);
-      String f = metaLogs[0].getName();
-      int idx = f.lastIndexOf("-");
-      int idx2 = f.lastIndexOf(".");
-      mainId = f.substring(idx + 1, idx2);
-    }
-    logAndRollAndVerify(baseLogName, ds, mainId);
-    /*
-     * Ok now we have rolled and yada yada. Let's disconnect and reconnect with same name!
-     */
-    int dsId = System.identityHashCode(ds);
-    props.put(LOG_DISK_SPACE_LIMIT, "200");
-
-    File f1m = new File(logfile);
-    assertTrue(f1m.exists());
-    File f1c1 = new File(baseLogName + "-" + mainId + "-01.log");
-    assertTrue(f1c1.exists());
-    File f1c2 = new File(baseLogName + "-" + mainId + "-02.log");
-    assertTrue(f1c2.exists());
-
-    File f1c3 = new File(baseLogName + "-" + mainId + "-03.log");
-    assertTrue(!f1c3.exists());
-
-    String nextMainId;
-    {
-      int mId = Integer.parseInt(mainId);
-      mId++;
-      StringBuffer sb = new StringBuffer();
-      if (mId < 10) {
-        sb.append('0');
-      }
-      sb.append(mId);
-      nextMainId = sb.toString();
-    }
-    File f2c1 = new File(baseLogName + "-" + nextMainId + "-01.log");
-    assertTrue(!f2c1.exists());
-
-
-    /*
-     * Lets just make sure all the proper files exist
-     */
-    ds = this.getSystem(props);
-    int dsId2 = System.identityHashCode(ds);
-    assertTrue("This should be a new ds!", dsId != dsId2);
-    /*
-     * creating the new system should have rolled the old rolling log
-     * (biscuits.log->biscuits-02-01.log)
-     * 
-     */
-    // The following assert does not work on Windows because
-    // we can't rename the last biscuits.log because it is still open
-    // The DistributedSystem disconnect is not closing the logger enough
-    // so that it can be renamed.
-    // Reenable this assertion once this issue (bug 42176) is fixed.
-    assertTrue(f1c3.exists());
-  }
-
-  @Category(FlakyTest.class) // GEODE-676: possible disk pollution, file size sensitive
-  @Test
-  public void testLogFileLayoutAndRolling() throws Exception {
-    String baseLogName = "tacos";
-    Properties props = new Properties();
-
-    String logfile = baseLogName + ".log";
-    props.put(LOG_FILE, logfile);
-    props.put(LOG_FILE_SIZE_LIMIT, "1");
-    props.put(LOG_LEVEL, "config");
-
-    DistributedSystem ds = getSystem(props);
-    InternalDistributedSystem ids = (InternalDistributedSystem) ds;
-    assertEquals(InternalLogWriter.INFO_LEVEL,
-        ((InternalLogWriter) ids.getLogWriter()).getLogWriterLevel());
-
-    // Lets figure out the mainId we start with
-    String mainId;
-    {
-      final Pattern mainIdPattern = Pattern.compile("meta-" + baseLogName + "-\\d+.log");
-      File[] metaLogs = new File(".").listFiles(new FilenameFilter() {
-        public boolean accept(File d, String name) {
-          return mainIdPattern.matcher(name).matches();
-        }
-      });
-      assertEquals(1, metaLogs.length);
-      String f = metaLogs[0].getName();
-      int idx = f.lastIndexOf("-");
-      int idx2 = f.lastIndexOf(".");
-      mainId = f.substring(idx + 1, idx2);
-    }
-    ds.getProperties();
-    logAndRollAndVerify(baseLogName, ds, mainId);
-  }
-
-  @Category(FlakyTest.class) // GEODE-675: possible disk pollution, file size sensitive
-  @Test
-  public void testSecurityLogFileLayoutAndRolling() throws Exception {
-    String baseLogName = "securitytacos";
-    Properties props = new Properties();
-
-    String logfile = baseLogName + ".log";
-    String sec_logfile = "sec_" + baseLogName + ".log";
-    props.put(LOG_FILE, logfile);
-    props.put(LOG_FILE_SIZE_LIMIT, "1");
-    props.put(LOG_LEVEL, "config");
-    props.put(SECURITY_LOG_FILE, sec_logfile);
-    props.put(SECURITY_LOG_LEVEL, "config");
-
-    DistributedSystem ds = getSystem(props);
-    InternalDistributedSystem ids = (InternalDistributedSystem) ds;
-    assertEquals(InternalLogWriter.INFO_LEVEL,
-        ((InternalLogWriter) ids.getLogWriter()).getLogWriterLevel());
-    assertEquals(InternalLogWriter.INFO_LEVEL,
-        ((InternalLogWriter) ids.getSecurityLogWriter()).getLogWriterLevel());
-
-    // Lets figure out the mainId we start with
-    String mainId;
-    {
-      final Pattern mainIdPattern = Pattern.compile("meta-" + baseLogName + "-\\d+.log");
-      File[] metaLogs = new File(".").listFiles(new FilenameFilter() {
-        public boolean accept(File d, String name) {
-          return mainIdPattern.matcher(name).matches();
-        }
-      });
-      assertEquals(1, metaLogs.length);
-      String f = metaLogs[0].getName();
-      int idx = f.lastIndexOf("-");
-      int idx2 = f.lastIndexOf(".");
-      mainId = f.substring(idx + 1, idx2);
-    }
-    ds.getProperties();
-    SecurityLogAndRollAndVerify(baseLogName, ds, mainId);
-  }
-
-  String getLogContents(String logfile) throws FileNotFoundException, IOException {
-    File f = new File(logfile);
-    BufferedReader reader = new BufferedReader(new FileReader(f));
-    StringBuffer fileData = new StringBuffer();
-    int numRead = 0;
-    char[] buf = new char[1024];
-    while ((numRead = reader.read(buf)) != -1) {
-      String readData = String.valueOf(buf, 0, numRead);
-      fileData.append(readData);
-      buf = new char[1024];
-    }
-    return fileData.toString();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/b48f5a35/geode-core/src/test/java/org/apache/geode/internal/logging/CacheLogRollingIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/logging/CacheLogRollingIntegrationTest.java b/geode-core/src/test/java/org/apache/geode/internal/logging/CacheLogRollingIntegrationTest.java
new file mode 100644
index 0000000..7176ae3
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/logging/CacheLogRollingIntegrationTest.java
@@ -0,0 +1,349 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.logging;
+
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+import static org.assertj.core.api.Assertions.*;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.Properties;
+import java.util.regex.Pattern;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.TestName;
+
+import org.apache.geode.LogWriter;
+import org.apache.geode.distributed.DistributedSystem;
+import org.apache.geode.test.junit.categories.IntegrationTest;
+
+/**
+ * Integration tests for log rolling with cache lifecycle.
+ *
+ * @since GemFire 6.5
+ */
+@Category(IntegrationTest.class)
+public class CacheLogRollingIntegrationTest {
+
+  private static final int MAX_LOG_STATEMENTS = 100000;
+  private static final String SECURITY_PREFIX = "security_";
+
+  private String baseName;
+  private File dir;
+  private File logFile;
+  private File securityLogFile;
+  private Pattern mainIdPattern;
+  private DistributedSystem system;
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+  @Rule
+  public TestName testName = new TestName();
+
+  @Before
+  public void before() throws Exception {
+    this.baseName = this.testName.getMethodName();
+    this.dir = this.temporaryFolder.getRoot();
+    this.logFile = new File(this.dir, logFileName());
+    this.securityLogFile = new File(this.dir, securityLogFileName());
+    this.mainIdPattern = Pattern.compile("meta-" + this.baseName + "-\\d\\d.log");
+  }
+
+  @After
+  public void after() throws Exception {
+    if (this.system != null) {
+      this.system.disconnect();
+    }
+  }
+
+  @Test
+  public void testSimpleStartRestartWithRolling() throws Exception {
+    Properties config = new Properties();
+    config.put(LOG_FILE, this.logFile.getAbsolutePath());
+    config.put(LOG_FILE_SIZE_LIMIT, "1");
+    config.put(LOG_DISK_SPACE_LIMIT, "200");
+
+    this.system = DistributedSystem.connect(config);
+    this.system.disconnect();
+
+    for (int mainInt = 2; mainInt <= 4; mainInt++) {
+      assertThat(metaFile(mainInt - 1)).exists();
+
+      File newMetaFile = metaFile(mainInt);
+      File newRolledLogFile = childFile(mainInt - 1, 1);
+
+      assertThat(newMetaFile).doesNotExist();
+      assertThat(newRolledLogFile).doesNotExist();
+
+      this.system = DistributedSystem.connect(config);
+
+      assertThat(newMetaFile).exists();
+      assertThat(newRolledLogFile).exists();
+
+      this.system.disconnect();
+    }
+  }
+
+  @Test
+  public void testStartWithRollingThenRestartWithRolling() throws Exception {
+    Properties config = new Properties();
+    config.put(LOG_FILE, this.logFile.getAbsolutePath());
+    config.put(LOG_FILE_SIZE_LIMIT, "1");
+
+    this.system = DistributedSystem.connect(config);
+
+    logAndRollAndVerify(1);
+
+    DistributedSystem firstSystem = this.system;
+
+    assertThat(this.logFile).exists();
+    assertThat(childFile(1, 1)).exists();
+    assertThat(childFile(1, 2)).exists();
+    assertThat(childFile(1, 3)).doesNotExist();
+    assertThat(childFile(2, 1)).doesNotExist();
+
+    this.system.disconnect();
+
+    config.put(LOG_DISK_SPACE_LIMIT, "200");
+    this.system = DistributedSystem.connect(config);
+
+    assertThat(this.system).isNotSameAs(firstSystem);
+    assertThat(childFile(1, 3)).exists();
+  }
+
+  @Test
+  public void testLogFileLayoutAndRolling() throws Exception {
+    Properties config = new Properties();
+    config.put(LOG_FILE, this.logFile.getAbsolutePath());
+    config.put(LOG_FILE_SIZE_LIMIT, "1");
+
+    this.system = DistributedSystem.connect(config);
+
+    logAndRollAndVerify(1);
+  }
+
+  @Test
+  public void testSecurityLogFileLayoutAndRolling() throws Exception {
+    Properties config = new Properties();
+    config.put(LOG_FILE, this.logFile.getAbsolutePath());
+    config.put(LOG_FILE_SIZE_LIMIT, "1");
+    config.put(SECURITY_LOG_FILE, this.securityLogFile.getAbsolutePath());
+
+    this.system = DistributedSystem.connect(config);
+
+    securityLogAndRollAndVerify(1);
+  }
+
+  @Test
+  public void with_logFileSizeLimit_should_createMetaLogFile() throws Exception {
+    Properties config = new Properties();
+    config.put(LOG_FILE, this.logFile.getAbsolutePath());
+    config.put(LOG_FILE_SIZE_LIMIT, "1");
+
+    this.system = DistributedSystem.connect(config);
+
+    File[] metaLogsMatched =
+        this.dir.listFiles((dir, name) -> mainIdPattern.matcher(name).matches());
+    assertThat(metaLogsMatched).hasSize(1);
+
+    File metaLogFile = metaFile(1);
+    assertThat(metaLogFile).exists();
+  }
+
+  @Test
+  public void without_logFileSizeLimit_shouldNot_createMetaLogFile() throws Exception {
+    Properties config = new Properties();
+    config.put(LOG_FILE, this.logFile.getAbsolutePath());
+
+    this.system = DistributedSystem.connect(config);
+
+    File[] metaLogsMatched =
+        this.dir.listFiles((dir, name) -> mainIdPattern.matcher(name).matches());
+    assertThat(metaLogsMatched).hasSize(0);
+
+    File metaLogFile = metaFile(12);
+    assertThat(metaLogFile).doesNotExist();
+  }
+
+  private String readContents(final File file) throws IOException {
+    assertThat(file).exists();
+
+    BufferedReader reader = new BufferedReader(new FileReader(file));
+    StringBuffer buffer = new StringBuffer();
+    int numRead;
+    char[] chars = new char[1024];
+
+    while ((numRead = reader.read(chars)) != -1) {
+      String readData = String.valueOf(chars, 0, numRead);
+      buffer.append(readData);
+      chars = new char[1024];
+    }
+
+    return buffer.toString();
+  }
+
+  /**
+   * 1. Lets assert that the logfile exists and that it is a proper normal logfile<br>
+   * 2. Assert that the meta logfile exists and has good stuff in it<br>
+   * 3. Let's log a bunch and show that we rolled<br>
+   * 4. Show that old file has right old stuff in it<br>
+   * 5. Show that new file has right new stuff in it<br>
+   * 6. Show that meta has right stuff in it<br>
+   */
+  private void logAndRollAndVerify(final int mainId) throws IOException {
+    File metaLogFile = metaFile(mainId);
+    File childLogFile01 = childFile(mainId, 1);
+    File childLogFile02 = childFile(mainId, 2);
+
+    String switchingToLog = "Switching to log " + this.logFile;
+    String rollingCurrentLogTo01 = "Rolling current log to " + childLogFile01;
+    String rollingCurrentLogTo02 = "Rolling current log to " + childLogFile02;
+
+    String messageInChild = "hey guys im the first child";
+    String message = "hey whatsup i can't believe it wow";
+
+    this.system.getLogWriter().info(messageInChild);
+
+    assertThat(this.logFile).exists();
+    assertThat(metaLogFile).exists();
+    assertThat(childLogFile01).doesNotExist();
+    assertThat(childLogFile02).doesNotExist();
+    assertThat(readContents(metaLogFile)).contains(switchingToLog);
+    assertThat(readContents(this.logFile)).contains(messageInChild);
+
+    logUntilFileExists(this.system.getLogWriter(), message, childLogFile02);
+
+    assertThat(childLogFile01).exists();
+    assertThat(childLogFile02).exists();
+
+    String metaLogContents = readContents(metaLogFile);
+    assertThat(metaLogContents).contains(rollingCurrentLogTo01);
+    assertThat(metaLogContents).contains(rollingCurrentLogTo02);
+    assertThat(metaLogContents).doesNotContain(message);
+
+    assertThat(readContents(this.logFile)).contains(message);
+    assertThat(readContents(childLogFile01)).contains(message);
+    assertThat(readContents(childLogFile02)).contains(message);
+  }
+
+  /**
+   * 1. Lets assert that the logfile exists and that it is a proper normal logfile<br>
+   * 2. Assert that the meta logfile exists and has good stuff in it<br>
+   * 3. Let's log a bunch and show that we rolled<br>
+   * 4. Show that old file has right old stuff in it<br>
+   * 5. Show that new file has right new stuff in it<br>
+   * 6. Show that meta has right stuff in it<br>
+   */
+  private void securityLogAndRollAndVerify(final int mainId) throws IOException {
+    File metaLogFile = metaFile(mainId);
+    File childLogFile01 = childFile(mainId, 1);
+    File childLogFile02 = childFile(mainId, 2);
+    File childSecurityLogFile01 = childSecurityFile(mainId, 1);
+    File childSecurityLogFile02 = childSecurityFile(mainId, 2);
+
+    String switchingToLog = "Switching to log " + this.logFile;
+    String rollingCurrentLogTo01 = "Rolling current log to " + childLogFile01;
+    String rollingCurrentLogTo02 = "Rolling current log to " + childLogFile02;
+
+    String messageInChild = "hey guys im the first child";
+    String messageInSecurityChild = "hey guys im the first security child";
+    String message = "hey whatsup i can't believe it wow";
+
+    this.system.getLogWriter().info(messageInChild);
+    this.system.getSecurityLogWriter().info(messageInSecurityChild);
+
+    assertThat(readContents(this.logFile)).contains(messageInChild)
+        .doesNotContain(messageInSecurityChild);
+    assertThat(readContents(this.securityLogFile)).contains(messageInSecurityChild)
+        .doesNotContain(messageInChild);
+
+    assertThat(readContents(metaLogFile)).contains(switchingToLog);
+
+    assertThat(childLogFile01).doesNotExist();
+    assertThat(childSecurityLogFile01).doesNotExist();
+    assertThat(childLogFile02).doesNotExist();
+    assertThat(childSecurityLogFile02).doesNotExist();
+
+    logUntilFileExists(this.system.getLogWriter(), message, childLogFile02);
+    logUntilFileExists(this.system.getSecurityLogWriter(), message, childSecurityLogFile02);
+
+    assertThat(readContents(this.logFile)).contains(message);
+    assertThat(readContents(this.securityLogFile)).contains(message);
+
+    String metaLogContents = readContents(metaLogFile);
+    assertThat(metaLogContents).contains(rollingCurrentLogTo01);
+    assertThat(metaLogContents).contains(rollingCurrentLogTo02);
+    assertThat(metaLogContents).doesNotContain(message);
+
+    assertThat(readContents(childLogFile01)).contains(message);
+    assertThat(readContents(childSecurityLogFile01)).contains(message);
+    assertThat(readContents(childLogFile02)).contains(message);
+    assertThat(readContents(childSecurityLogFile02)).contains(message);
+  }
+
+  private void logUntilFileExists(final LogWriter logWriter, final String message,
+      final File logFile) {
+    for (int i = 0; i < MAX_LOG_STATEMENTS && !logFile.exists(); i++) {
+      logWriter.info(message);
+    }
+    assertThat(logFile).exists();
+  }
+
+  private String formatId(final int id) {
+    return String.format("%02d", id);
+  }
+
+  private String logFileName() {
+    return this.baseName + ".log";
+  }
+
+  private String securityLogFileName() {
+    return SECURITY_PREFIX + this.baseName + ".log";
+  }
+
+  private String metaFileName(int mainId) {
+    return "meta-" + this.baseName + "-" + formatId(mainId) + ".log";
+  }
+
+  private File metaFile(int mainId) {
+    return new File(this.dir, metaFileName(mainId));
+  }
+
+  private String childFileName(int mainId, int childId) {
+    return this.baseName + "-" + formatId(mainId) + "-" + formatId(childId) + ".log";
+  }
+
+  private File childFile(int mainId, int childId) {
+    return new File(this.dir, childFileName(mainId, childId));
+  }
+
+  private String childSecurityFileName(int mainId, int childId) {
+    return SECURITY_PREFIX + this.baseName + "-" + formatId(mainId) + "-" + formatId(childId)
+        + ".log";
+  }
+
+  private File childSecurityFile(int mainId, int childId) {
+    return new File(this.dir, childSecurityFileName(mainId, childId));
+  }
+
+}


[06/10] geode git commit: GEODE-2639: Added a wait for flush to avoid flakiness in test

Posted by kl...@apache.org.
GEODE-2639: Added a wait for flush to avoid flakiness in test


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/89847ddb
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/89847ddb
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/89847ddb

Branch: refs/heads/feature/GEODE-2645
Commit: 89847ddb5eafa3f9e1d918d3d3b99a631ab99608
Parents: d8a8973
Author: Jason Huynh <hu...@gmail.com>
Authored: Tue Mar 14 10:16:38 2017 -0700
Committer: Jason Huynh <hu...@gmail.com>
Committed: Tue Mar 14 11:47:55 2017 -0700

----------------------------------------------------------------------
 .../test/java/org/apache/geode/cache/lucene/EvictionDUnitTest.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/89847ddb/geode-lucene/src/test/java/org/apache/geode/cache/lucene/EvictionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/EvictionDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/EvictionDUnitTest.java
index 0fb0d9d..fc2be39 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/EvictionDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/EvictionDUnitTest.java
@@ -105,7 +105,7 @@ public class EvictionDUnitTest extends LuceneQueriesAccessorBase {
       Region region = cache.getRegion(REGION_NAME);
       IntStream.range(0, NUM_BUCKETS).forEach(i -> region.put(i, new TestObject("hello world")));
     });
-
+    waitForFlushBeforeExecuteTextSearch(accessor, 60000);
     dataStore1.invoke(() -> {
       try {
         getCache().getResourceManager().setEvictionHeapPercentage(INITIAL_EVICTION_HEAP_PERCENTAGE);


[08/10] geode git commit: GEODE-2599: fix for "null" in string of dots

Posted by kl...@apache.org.
GEODE-2599: fix for "null" in string of dots

Check for string "null" in server/locator status message

Refactor server and locator launcer wait loops by using Process.isAlive
instead of Process.exitValue


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/6b4e4f2c
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/6b4e4f2c
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/6b4e4f2c

Branch: refs/heads/feature/GEODE-2645
Commit: 6b4e4f2c532c478d8a7a48bc5ac3a3152fc2e208
Parents: f9fa3e3
Author: Ken Howe <kh...@pivotal.io>
Authored: Fri Mar 10 15:09:04 2017 -0800
Committer: Ken Howe <kh...@pivotal.io>
Committed: Tue Mar 14 14:24:32 2017 -0700

----------------------------------------------------------------------
 .../cli/commands/LauncherLifecycleCommands.java | 53 ++++++++------------
 1 file changed, 22 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/6b4e4f2c/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
index d42d75e..3ad93ce 100755
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
@@ -384,21 +384,9 @@ public class LauncherLifecycleCommands extends AbstractCommandsSupport {
                 new File(locatorLauncher.getWorkingDirectory()))),
             null);
 
+        locatorState = locatorStatus(workingDirectory, memberName);
         do {
-          try {
-            final int exitValue = locatorProcess.exitValue();
-
-            stderrReader.join(PROCESS_STREAM_READER_JOIN_TIMEOUT_MILLIS); // was Long.MAX_VALUE
-
-            // Gfsh.println(message);
-
-            return ResultBuilder.createShellClientErrorResult(
-                String.format(CliStrings.START_LOCATOR__PROCESS_TERMINATED_ABNORMALLY_ERROR_MESSAGE,
-                    exitValue, locatorLauncher.getWorkingDirectory(), message.toString()));
-          } catch (IllegalThreadStateException ignore) {
-            // the IllegalThreadStateException is expected; it means the Locator's process has not
-            // terminated,
-            // and basically should not
+          if (locatorProcess.isAlive()) {
             Gfsh.print(".");
 
             synchronized (this) {
@@ -410,12 +398,19 @@ public class LauncherLifecycleCommands extends AbstractCommandsSupport {
             String currentLocatorStatusMessage = locatorState.getStatusMessage();
 
             if (isStartingOrNotResponding(locatorState.getStatus())
-                && !(StringUtils.isBlank(currentLocatorStatusMessage) || currentLocatorStatusMessage
-                    .equalsIgnoreCase(previousLocatorStatusMessage))) {
+                && !(StringUtils.isBlank(currentLocatorStatusMessage)
+                    || currentLocatorStatusMessage.equalsIgnoreCase(previousLocatorStatusMessage)
+                    || currentLocatorStatusMessage.trim().toLowerCase().equals("null"))) {
               Gfsh.println();
               Gfsh.println(currentLocatorStatusMessage);
               previousLocatorStatusMessage = currentLocatorStatusMessage;
             }
+          } else {
+            final int exitValue = locatorProcess.exitValue();
+
+            return ResultBuilder.createShellClientErrorResult(
+                String.format(CliStrings.START_LOCATOR__PROCESS_TERMINATED_ABNORMALLY_ERROR_MESSAGE,
+                    exitValue, locatorLauncher.getWorkingDirectory(), message.toString()));
           }
         } while (!(registeredLocatorSignalListener && locatorSignalListener.isSignaled())
             && isStartingOrNotResponding(locatorState.getStatus()));
@@ -1646,21 +1641,9 @@ public class LauncherLifecycleCommands extends AbstractCommandsSupport {
                 new File(serverLauncher.getWorkingDirectory()))),
             null);
 
+        serverState = serverStatus(workingDirectory, memberName);
         do {
-          try {
-            final int exitValue = serverProcess.exitValue();
-
-            stderrReader.join(PROCESS_STREAM_READER_JOIN_TIMEOUT_MILLIS); // was Long.MAX_VALUE
-
-            // Gfsh.println(message);
-
-            return ResultBuilder.createShellClientErrorResult(
-                String.format(CliStrings.START_SERVER__PROCESS_TERMINATED_ABNORMALLY_ERROR_MESSAGE,
-                    exitValue, serverLauncher.getWorkingDirectory(), message.toString()));
-          } catch (IllegalThreadStateException ignore) {
-            // the IllegalThreadStateException is expected; it means the Server's process has not
-            // terminated,
-            // and should not
+          if (serverProcess.isAlive()) {
             Gfsh.print(".");
 
             synchronized (this) {
@@ -1673,11 +1656,19 @@ public class LauncherLifecycleCommands extends AbstractCommandsSupport {
 
             if (isStartingOrNotResponding(serverState.getStatus())
                 && !(StringUtils.isBlank(currentServerStatusMessage)
-                    || currentServerStatusMessage.equalsIgnoreCase(previousServerStatusMessage))) {
+                    || currentServerStatusMessage.equalsIgnoreCase(previousServerStatusMessage)
+                    || currentServerStatusMessage.trim().toLowerCase().equals("null"))) {
               Gfsh.println();
               Gfsh.println(currentServerStatusMessage);
               previousServerStatusMessage = currentServerStatusMessage;
             }
+          } else {
+            final int exitValue = serverProcess.exitValue();
+
+            return ResultBuilder.createShellClientErrorResult(
+                String.format(CliStrings.START_SERVER__PROCESS_TERMINATED_ABNORMALLY_ERROR_MESSAGE,
+                    exitValue, serverLauncher.getWorkingDirectory(), message.toString()));
+
           }
         } while (!(registeredServerSignalListener && serverSignalListener.isSignaled())
             && isStartingOrNotResponding(serverState.getStatus()));


[03/10] geode git commit: GEODE-2303 Add eccn text to README.md

Posted by kl...@apache.org.
GEODE-2303 Add eccn text to README.md


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/3c212fb1
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/3c212fb1
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/3c212fb1

Branch: refs/heads/feature/GEODE-2645
Commit: 3c212fb11c8220f19750a154d4e31e38713b52f5
Parents: 55a2a3c
Author: Anthony Baker <ab...@apache.org>
Authored: Mon Mar 6 17:01:39 2017 -0800
Committer: Anthony Baker <ab...@apache.org>
Committed: Mon Mar 13 20:32:47 2017 -0700

----------------------------------------------------------------------
 README.md | 32 +++++++++++++++++++++++++++++++-
 1 file changed, 31 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/3c212fb1/README.md
----------------------------------------------------------------------
diff --git a/README.md b/README.md
index 0909364..bc9e519 100644
--- a/README.md
+++ b/README.md
@@ -12,7 +12,7 @@
 5. [Application Development](#development)
 6. [Documentation](http://geode.apache.org/docs/)
 7. [Wiki](https://cwiki.apache.org/confluence/display/GEODE/Index)
-
+8. [Export Control](#export)8. [Export Control](#export)8. [Export Control](#export)8. [Export Control](#export)8. [Export Control](#export)8. [Export Control](#export)8. [Export Control](#export)8. [Export Control](#export)8. [Export Control](#export)
 
 ## <a name="overview"></a>Overview
 
@@ -199,3 +199,33 @@ The following libraries are available external to the Apache Geode project:
 * [Spring Data GemFire](http://projects.spring.io/spring-data-gemfire/)
 * [Spring Cache](http://docs.spring.io/spring/docs/current/spring-framework-reference/html/cache.html)
 * [Python](https://github.com/gemfire/py-gemfire-rest)
+
+## <a name="export"></a>Export Control
+
+This distribution includes cryptographic software.
+The country in which you currently reside may have restrictions
+on the import, possession, use, and/or re-export to another country,
+of encryption software. BEFORE using any encryption software,
+please check your country's laws, regulations and policies
+concerning the import, possession, or use, and re-export of
+encryption software, to see if this is permitted.
+See <http://www.wassenaar.org/> for more information.
+
+The U.S. Government Department of Commerce, Bureau of Industry and Security (BIS),
+has classified this software as Export Commodity Control Number (ECCN) 5D002.C.1,
+which includes information security software using or performing
+cryptographic functions with asymmetric algorithms.
+The form and manner of this Apache Software Foundation distribution makes
+it eligible for export under the License Exception
+ENC Technology Software Unrestricted (TSU) exception
+(see the BIS Export Administration Regulations, Section 740.13)
+for both object code and source code.
+
+The following provides more details on the included cryptographic software:
+
+* Apache Geode is designed to be used with
+  [Java Secure Socket Extension](https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html) (JSSE) and
+  [Java Cryptography Extension](http://docs.oracle.com/javase/8/docs/technotes/guides/security/crypto/CryptoSpec.html) (JCE).
+  The [JCE Unlimited Strength Jurisdiction Policy](http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html)
+  may need to be installed separately to use keystore passwords with 7 or more characters.
+* Apache Geode links to and uses [OpenSSL](https://www.openssl.org/) ciphers.


[07/10] geode git commit: GEODE-2641: minor help string changes

Posted by kl...@apache.org.
GEODE-2641: minor help string changes


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/f9fa3e35
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/f9fa3e35
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/f9fa3e35

Branch: refs/heads/feature/GEODE-2645
Commit: f9fa3e35bb60cb67d1d709d1ee080f61d6e60a1d
Parents: 057f60b
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Tue Mar 14 09:34:26 2017 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Tue Mar 14 13:16:22 2017 -0700

----------------------------------------------------------------------
 .../geode/management/internal/cli/i18n/CliStrings.java       | 4 ++--
 .../internal/cli/commands/golden-help-offline.properties     | 8 +++++---
 2 files changed, 7 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/f9fa3e35/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java
index b835e8b..5b1f089 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/i18n/CliStrings.java
@@ -1391,7 +1391,7 @@ public class CliStrings {
   public static final String EXPORT_LOGS__HELP = "Export the log files for a member or members.";
   public static final String EXPORT_LOGS__DIR = "dir";
   public static final String EXPORT_LOGS__DIR__HELP =
-      "Local directory to which log files will be written. This is only used when you are exporting logs using http connection. If not specified, user.dir will be used.";
+      "Local directory to which logs will be written. This is used only when you are exporting logs using an http connection. If not specified, logs are written to the location specified by the user.dir system property.";
   public static final String EXPORT_LOGS__MEMBER = "member";
   public static final String EXPORT_LOGS__MEMBER__HELP =
       "Name/Id of the member whose log files will be exported.";
@@ -1413,7 +1413,7 @@ public class CliStrings {
       "Log entries that occurred before this time will be exported. The default is no limit. Format: yyyy/MM/dd/HH/mm/ss/SSS/z OR yyyy/MM/dd";
   public static final String EXPORT_LOGS__MERGELOG = "merge-log";
   public static final String EXPORT_LOGS__MERGELOG__HELP =
-      "Whether to merge logs after exporting to the target directory.";
+      "Whether to merge logs after exporting to the target directory. Deprecated: Since Geode1.2, no longer used.";
   public static final String EXPORT_LOGS__MSG__CANNOT_MERGE =
       "Could not merge the files in target directory";
   public static final String EXPORT_LOGS__MSG__SPECIFY_ONE_OF_OPTION =

http://git-wip-us.apache.org/repos/asf/geode/blob/f9fa3e35/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties b/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties
index 8cc8aba..3c56def 100644
--- a/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties
+++ b/geode-core/src/test/resources/org/apache/geode/management/internal/cli/commands/golden-help-offline.properties
@@ -1473,8 +1473,9 @@ SYNTAX\n\
 \ \ \ \ [--end-time=value] [--logs-only(=value)?] [--stats-only(=value)?]\n\
 PARAMETERS\n\
 \ \ \ \ dir\n\
-\ \ \ \ \ \ \ \ Local directory to which log files will be written. This is only used when you are\n\
-\ \ \ \ \ \ \ \ exporting logs using http connection. If not specified, user.dir will be used.\n\
+\ \ \ \ \ \ \ \ Local directory to which logs will be written. This is used only when you are exporting\n\
+\ \ \ \ \ \ \ \ logs using an http connection. If not specified, logs are written to the location specified\n\
+\ \ \ \ \ \ \ \ by the user.dir system property.\n\
 \ \ \ \ \ \ \ \ Required: false\n\
 \ \ \ \ group\n\
 \ \ \ \ \ \ \ \ Group of members whose log files will be exported.\n\
@@ -1492,7 +1493,8 @@ PARAMETERS\n\
 \ \ \ \ \ \ \ \ Required: false\n\
 \ \ \ \ \ \ \ \ Default (if the parameter is not specified): false\n\
 \ \ \ \ merge-log\n\
-\ \ \ \ \ \ \ \ Whether to merge logs after exporting to the target directory.\n\
+\ \ \ \ \ \ \ \ Whether to merge logs after exporting to the target directory. Deprecated: Since Geode1.2,\n\
+\ \ \ \ \ \ \ \ no longer used.\n\
 \ \ \ \ \ \ \ \ Required: false\n\
 \ \ \ \ \ \ \ \ Default (if the parameter is not specified): false\n\
 \ \ \ \ start-time\n\


[05/10] geode git commit: GEODE-2522: Added missing teardown method in test

Posted by kl...@apache.org.
GEODE-2522: Added missing teardown method in test


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/057f60be
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/057f60be
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/057f60be

Branch: refs/heads/feature/GEODE-2645
Commit: 057f60be137992085f2044354a6387c9fe752059
Parents: 89847dd
Author: Jason Huynh <hu...@gmail.com>
Authored: Tue Mar 14 11:23:44 2017 -0700
Committer: Jason Huynh <hu...@gmail.com>
Committed: Tue Mar 14 11:47:55 2017 -0700

----------------------------------------------------------------------
 .../geode/cache/lucene/RebalanceWithRedundancyDUnitTest.java  | 7 +++++++
 1 file changed, 7 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/057f60be/geode-lucene/src/test/java/org/apache/geode/cache/lucene/RebalanceWithRedundancyDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/RebalanceWithRedundancyDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/RebalanceWithRedundancyDUnitTest.java
index 7406162..f866e51 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/RebalanceWithRedundancyDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/RebalanceWithRedundancyDUnitTest.java
@@ -36,6 +36,7 @@ import org.apache.geode.test.junit.categories.DistributedTest;
 
 import org.awaitility.Awaitility;
 
+import org.junit.After;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
@@ -47,6 +48,12 @@ import junitparams.Parameters;
 @RunWith(JUnitParamsRunner.class)
 public class RebalanceWithRedundancyDUnitTest extends LuceneQueriesAccessorBase {
 
+  @After
+  public void cleanupRebalanceCallback() {
+    removeCallback(dataStore1);
+    removeCallback(dataStore2);
+  }
+
   @Override
   protected RegionTestableType[] getListOfRegionTestTypes() {
     return new RegionTestableType[] {RegionTestableType.PARTITION_REDUNDANT,


[02/10] geode git commit: GEODE-2267: enhance error output for gfsh.

Posted by kl...@apache.org.
GEODE-2267: enhance error output for gfsh.

* correctly output error message if gfsh execution has an error
* export logs should output correct log message over http connection as well


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/55a2a3ce
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/55a2a3ce
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/55a2a3ce

Branch: refs/heads/feature/GEODE-2645
Commit: 55a2a3ce0b3c30f7386096d0ff3a8c4eae0c08b3
Parents: f4701a1
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Thu Mar 9 22:33:13 2017 -0800
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Mon Mar 13 16:40:18 2017 -0700

----------------------------------------------------------------------
 .../internal/cli/commands/ExportLogCommand.java |   9 +-
 .../cli/shell/GfshExecutionStrategy.java        |   6 +-
 .../web/controllers/ExportLogController.java    |  44 +++++-
 .../web/shell/AbstractHttpOperationInvoker.java |  19 ++-
 .../cli/commands/ExportLogsIntegrationTest.java |  71 +++++++++
 .../cli/commands/ExportLogsStatsDUnitTest.java  | 150 ++++++++++++++++++
 .../cli/commands/ExportStatsDUnitTest.java      | 153 -------------------
 .../controllers/ExportLogControllerTest.java    |  56 +++++++
 .../dunit/rules/GfshShellConnectionRule.java    |   4 +-
 .../commands/ExportLogsOverHttpDUnitTest.java   |  71 ---------
 .../ExportLogsOverHttpIntegrationTest.java      |  30 ++++
 .../ExportLogsStatsOverHttpDUnitTest.java       |  71 +++++++++
 12 files changed, 440 insertions(+), 244 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/55a2a3ce/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ExportLogCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ExportLogCommand.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ExportLogCommand.java
index 36d071c..678fdaf 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ExportLogCommand.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ExportLogCommand.java
@@ -98,6 +98,10 @@ public class ExportLogCommand implements CommandMarker {
       Set<DistributedMember> targetMembers =
           CliUtil.findMembersIncludingLocators(groups, memberIds);
 
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
+
       Map<String, Path> zipFilesFromMembers = new HashMap<>();
       for (DistributedMember server : targetMembers) {
         Region region = ExportLogsFunction.createOrGetExistingExportLogsRegion(true, cache);
@@ -120,8 +124,11 @@ public class ExportLogCommand implements CommandMarker {
         }
       }
 
-      Path tempDir = Files.createTempDirectory("exportedLogs");
+      if (zipFilesFromMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult("No files to be exported.");
+      }
 
+      Path tempDir = Files.createTempDirectory("exportedLogs");
       // make sure the directory is created, so that even if there is no files unzipped to this dir,
       // we can
       // still zip it and send an empty zip file back to the client

http://git-wip-us.apache.org/repos/asf/geode/blob/55a2a3ce/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/GfshExecutionStrategy.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/GfshExecutionStrategy.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/GfshExecutionStrategy.java
index ad78efd..d74f5d6 100755
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/GfshExecutionStrategy.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/shell/GfshExecutionStrategy.java
@@ -284,12 +284,10 @@ public class GfshExecutionStrategy implements ExecutionStrategy {
       if (postExecResult != null) {
         if (Status.ERROR.equals(postExecResult.getStatus())) {
           if (logWrapper.infoEnabled()) {
-            logWrapper
-                .info("Post execution Result :: " + ResultBuilder.resultAsString(postExecResult));
+            logWrapper.info("Post execution Result :: " + postExecResult);
           }
         } else if (logWrapper.fineEnabled()) {
-          logWrapper
-              .fine("Post execution Result :: " + ResultBuilder.resultAsString(postExecResult));
+          logWrapper.fine("Post execution Result :: " + postExecResult);
         }
         commandResult = postExecResult;
       }

http://git-wip-us.apache.org/repos/asf/geode/blob/55a2a3ce/geode-core/src/main/java/org/apache/geode/management/internal/web/controllers/ExportLogController.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/web/controllers/ExportLogController.java b/geode-core/src/main/java/org/apache/geode/management/internal/web/controllers/ExportLogController.java
index 0351573..527e059 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/web/controllers/ExportLogController.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/web/controllers/ExportLogController.java
@@ -16,13 +16,16 @@
 package org.apache.geode.management.internal.web.controllers;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
 import org.apache.geode.internal.lang.StringUtils;
+import org.apache.geode.management.cli.Result;
 import org.apache.geode.management.internal.cli.i18n.CliStrings;
 import org.apache.geode.management.internal.cli.result.ResultBuilder;
 import org.apache.geode.management.internal.cli.util.CommandStringBuilder;
 import org.springframework.core.io.InputStreamResource;
 import org.springframework.http.HttpHeaders;
 import org.springframework.http.HttpStatus;
+import org.springframework.http.MediaType;
 import org.springframework.http.ResponseEntity;
 import org.springframework.stereotype.Controller;
 import org.springframework.web.bind.annotation.RequestMapping;
@@ -85,21 +88,50 @@ public class ExportLogController extends AbstractCommandsController {
       command.addOption(CliStrings.EXPORT_LOGS__ENDTIME, endTime);
     }
 
-    // the result is json string from CommandResult
     String result = processCommand(command.toString());
+    return getResponse(result);
 
-    // parse the result to get the file path. This file Path should always exist in the file system
-    String filePath = ResultBuilder.fromJson(result).nextLine().trim();
+  }
+
+  ResponseEntity<InputStreamResource> getResponse(String result) {
+    // the result is json string from CommandResul
+    Result commandResult = ResultBuilder.fromJson(result);
+    if (commandResult.getStatus().equals(Result.Status.OK)) {
+      return getOKResponse(commandResult);
+
+    } else {
+      return getErrorResponse(result);
+    }
+  }
+
+  private ResponseEntity<InputStreamResource> getErrorResponse(String result) {
+    HttpHeaders respHeaders = new HttpHeaders();
+    InputStreamResource isr;// if the command is successful, the output is the filepath,
+    // else we need to send the orignal result back so that the receiver will know to turn it
+    // into a Result object
+    try {
+      isr = new InputStreamResource(IOUtils.toInputStream(result, "UTF-8"));
+      respHeaders.set(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE);
+      return new ResponseEntity<InputStreamResource>(isr, respHeaders, HttpStatus.OK);
+    } catch (Exception e) {
+      throw new RuntimeException("IO Error writing file to output stream", e);
+    }
+  }
 
+  private ResponseEntity<InputStreamResource> getOKResponse(Result commandResult) {
     HttpHeaders respHeaders = new HttpHeaders();
+    InputStreamResource isr;// if the command is successful, the output is the filepath,
+    String filePath = commandResult.nextLine().trim();
     File zipFile = new File(filePath);
     try {
-      InputStreamResource isr = new InputStreamResource(new FileInputStream(zipFile));
+      isr = new InputStreamResource(new FileInputStream(zipFile));
+      respHeaders.set(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_OCTET_STREAM_VALUE);
       return new ResponseEntity<InputStreamResource>(isr, respHeaders, HttpStatus.OK);
-    } catch (Exception ex) {
-      throw new RuntimeException("IOError writing file to output stream", ex);
+    } catch (Exception e) {
+      throw new RuntimeException("IO Error writing file to output stream", e);
     } finally {
       FileUtils.deleteQuietly(zipFile);
     }
   }
+
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/55a2a3ce/geode-core/src/main/java/org/apache/geode/management/internal/web/shell/AbstractHttpOperationInvoker.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/web/shell/AbstractHttpOperationInvoker.java b/geode-core/src/main/java/org/apache/geode/management/internal/web/shell/AbstractHttpOperationInvoker.java
index f60aabc..25f972e 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/web/shell/AbstractHttpOperationInvoker.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/web/shell/AbstractHttpOperationInvoker.java
@@ -633,7 +633,7 @@ public abstract class AbstractHttpOperationInvoker implements HttpOperationInvok
     return response.getBody();
   }
 
-  protected Path downloadResponseToTempFile(ClientHttpRequest request,
+  protected Object downloadResponseToTempFile(ClientHttpRequest request,
       Map<String, ?> uriVariables) {
     final URI url = request.getURL(uriVariables);
 
@@ -645,13 +645,18 @@ public abstract class AbstractHttpOperationInvoker implements HttpOperationInvok
     };
 
     // Streams the response instead of loading it all in memory
-    ResponseExtractor<Path> responseExtractor = resp -> {
-      Path tempFile = Files.createTempFile("fileDownload", "");
-      if (tempFile.toFile().exists()) {
-        FileUtils.deleteQuietly(tempFile.toFile());
+    ResponseExtractor<Object> responseExtractor = resp -> {
+      MediaType mediaType = resp.getHeaders().getContentType();
+      if (mediaType.equals(MediaType.APPLICATION_JSON)) {
+        return org.apache.commons.io.IOUtils.toString(resp.getBody(), "UTF-8");
+      } else {
+        Path tempFile = Files.createTempFile("fileDownload", "");
+        if (tempFile.toFile().exists()) {
+          FileUtils.deleteQuietly(tempFile.toFile());
+        }
+        Files.copy(resp.getBody(), tempFile);
+        return tempFile;
       }
-      Files.copy(resp.getBody(), tempFile);
-      return tempFile;
     };
     return getRestTemplate().execute(url, org.springframework.http.HttpMethod.GET, requestCallback,
         responseExtractor);

http://git-wip-us.apache.org/repos/asf/geode/blob/55a2a3ce/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsIntegrationTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsIntegrationTest.java
new file mode 100644
index 0000000..46a07ad
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsIntegrationTest.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.management.internal.cli.commands;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import org.apache.geode.distributed.ConfigurationProperties;
+import org.apache.geode.internal.AvailablePortHelper;
+import org.apache.geode.test.dunit.rules.GfshShellConnectionRule;
+import org.apache.geode.test.dunit.rules.LocatorStarterRule;
+import org.apache.geode.test.junit.categories.IntegrationTest;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.util.Properties;
+
+@Category(IntegrationTest.class)
+public class ExportLogsIntegrationTest {
+
+  @ClassRule
+  public static LocatorStarterRule locator = new LocatorStarterRule();
+
+  @Rule
+  public GfshShellConnectionRule gfsh = new GfshShellConnectionRule();
+
+  private static int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
+  protected static int httpPort = ports[0];
+  protected static int jmxPort = ports[1];
+
+  @BeforeClass
+  public static void before() throws Exception {
+    Properties properties = new Properties();
+    properties.setProperty(ConfigurationProperties.HTTP_SERVICE_PORT, httpPort + "");
+    properties.setProperty(ConfigurationProperties.JMX_MANAGER_PORT, jmxPort + "");
+    locator.startLocator(properties);
+  }
+
+  protected void connect() throws Exception {
+    gfsh.connectAndVerify(locator);
+  }
+
+  @Test
+  public void testInvalidMember() throws Exception {
+    connect();
+    gfsh.executeCommand("export logs --member=member1,member2");
+    assertThat(gfsh.getGfshOutput()).contains("No Members Found");
+  }
+
+  @Test
+  public void testNothingToExport() throws Exception {
+    connect();
+    gfsh.executeCommand("export logs --stats-only");
+    assertThat(gfsh.getGfshOutput()).contains("No files to be exported.");
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/55a2a3ce/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsStatsDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsStatsDUnitTest.java
new file mode 100644
index 0000000..de2ea64
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsStatsDUnitTest.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.management.internal.cli.commands;
+
+import static org.apache.geode.distributed.ConfigurationProperties.HTTP_SERVICE_BIND_ADDRESS;
+import static org.apache.geode.distributed.ConfigurationProperties.HTTP_SERVICE_PORT;
+import static org.apache.geode.distributed.ConfigurationProperties.JMX_MANAGER_PORT;
+import static org.apache.geode.distributed.ConfigurationProperties.STATISTIC_ARCHIVE_FILE;
+import static org.apache.geode.management.internal.cli.commands.ExportLogCommand.ONLY_DATE_FORMAT;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.google.common.collect.Sets;
+
+import org.apache.geode.distributed.ConfigurationProperties;
+import org.apache.geode.internal.AvailablePortHelper;
+import org.apache.geode.management.internal.cli.util.CommandStringBuilder;
+import org.apache.geode.test.dunit.rules.GfshShellConnectionRule;
+import org.apache.geode.test.dunit.rules.LocatorServerStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.categories.DistributedTest;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+import java.time.LocalDateTime;
+import java.time.ZoneId;
+import java.time.ZonedDateTime;
+import java.time.format.DateTimeFormatter;
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipFile;
+
+@Category(DistributedTest.class)
+public class ExportLogsStatsDUnitTest {
+  @ClassRule
+  public static LocatorServerStartupRule lsRule = new LocatorServerStartupRule();
+
+  @ClassRule
+  public static GfshShellConnectionRule connector = new GfshShellConnectionRule();
+
+  protected static int jmxPort, httpPort;
+  protected static Set<String> expectedZipEntries = new HashSet<>();
+  protected static MemberVM locator;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
+    httpPort = ports[0];
+    jmxPort = ports[1];
+    Properties locatorProperties = new Properties();
+    locatorProperties.setProperty(HTTP_SERVICE_BIND_ADDRESS, "localhost");
+    locatorProperties.setProperty(HTTP_SERVICE_PORT, httpPort + "");
+    locatorProperties.setProperty(JMX_MANAGER_PORT, jmxPort + "");
+
+    // start the locator in vm0 and then connect to it over http
+    locator = lsRule.startLocatorVM(0, locatorProperties);
+
+    Properties serverProperties = new Properties();
+    serverProperties.setProperty(ConfigurationProperties.STATISTIC_SAMPLING_ENABLED, "true");
+    serverProperties.setProperty(STATISTIC_ARCHIVE_FILE, "statistics.gfs");
+    lsRule.startServerVM(1, serverProperties, locator.getPort());
+
+    expectedZipEntries = Sets.newHashSet("locator-0/locator-0.log", "server-1/server-1.log",
+        "server-1/statistics.gfs");
+  }
+
+  protected void connectIfNeeded() throws Exception {
+    if (!connector.isConnected()) {
+      connector.connect(locator);
+    }
+  }
+
+  @Test
+  public void testExportLogsAndStats() throws Exception {
+    connectIfNeeded();
+    connector.executeAndVerifyCommand("export logs");
+    String zipPath = getZipPathFromCommandResult(connector.getGfshOutput());
+    Set<String> actualZipEnries = getZipEntries(zipPath);
+
+    Set<String> expectedFiles = Sets.newHashSet("locator-0/locator-0.log", "server-1/server-1.log",
+        "server-1/statistics.gfs");
+    assertThat(actualZipEnries).isEqualTo(expectedFiles);
+  }
+
+  @Test
+  public void testExportLogsOnly() throws Exception {
+    connectIfNeeded();
+    connector.executeAndVerifyCommand("export logs --logs-only");
+    String zipPath = getZipPathFromCommandResult(connector.getGfshOutput());
+    Set<String> actualZipEnries = getZipEntries(zipPath);
+
+    Set<String> expectedFiles = Sets.newHashSet("locator-0/locator-0.log", "server-1/server-1.log");
+    assertThat(actualZipEnries).isEqualTo(expectedFiles);
+  }
+
+  @Test
+  public void testExportStatsOnly() throws Exception {
+    connectIfNeeded();
+    connector.executeAndVerifyCommand("export logs --stats-only");
+    String zipPath = getZipPathFromCommandResult(connector.getGfshOutput());
+    Set<String> actualZipEnries = getZipEntries(zipPath);
+
+    Set<String> expectedFiles = Sets.newHashSet("server-1/statistics.gfs");
+    assertThat(actualZipEnries).isEqualTo(expectedFiles);
+  }
+
+  @Test
+  public void startAndEndDateCanExcludeLogs() throws Exception {
+    connectIfNeeded();
+    ZonedDateTime now = LocalDateTime.now().atZone(ZoneId.systemDefault());
+    ZonedDateTime yesterday = now.minusDays(1);
+    ZonedDateTime twoDaysAgo = now.minusDays(2);
+
+    DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern(ONLY_DATE_FORMAT);
+
+    CommandStringBuilder commandStringBuilder = new CommandStringBuilder("export logs");
+    commandStringBuilder.addOption("start-time", dateTimeFormatter.format(twoDaysAgo));
+    commandStringBuilder.addOption("end-time", dateTimeFormatter.format(yesterday));
+    commandStringBuilder.addOption("log-level", "debug");
+
+    String output = connector.execute(commandStringBuilder.toString());
+    assertThat(output).contains("No files to be exported");
+  }
+
+  protected String getZipPathFromCommandResult(String message) {
+    return message.replaceAll("Logs exported to the connected member's file system: ", "").trim();
+  }
+
+  private static Set<String> getZipEntries(String zipFilePath) throws IOException {
+    return new ZipFile(zipFilePath).stream().map(ZipEntry::getName).collect(Collectors.toSet());
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/55a2a3ce/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportStatsDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportStatsDUnitTest.java
deleted file mode 100644
index b7f8c2a..0000000
--- a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ExportStatsDUnitTest.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.geode.management.internal.cli.commands;
-
-import static org.apache.geode.distributed.ConfigurationProperties.HTTP_SERVICE_BIND_ADDRESS;
-import static org.apache.geode.distributed.ConfigurationProperties.HTTP_SERVICE_PORT;
-import static org.apache.geode.distributed.ConfigurationProperties.JMX_MANAGER_PORT;
-import static org.apache.geode.distributed.ConfigurationProperties.STATISTIC_ARCHIVE_FILE;
-import static org.apache.geode.management.internal.cli.commands.ExportLogCommand.ONLY_DATE_FORMAT;
-import static org.assertj.core.api.Assertions.assertThat;
-
-import com.google.common.collect.Sets;
-
-import org.apache.geode.distributed.ConfigurationProperties;
-import org.apache.geode.internal.AvailablePortHelper;
-import org.apache.geode.management.internal.cli.util.CommandStringBuilder;
-import org.apache.geode.test.dunit.rules.GfshShellConnectionRule;
-import org.apache.geode.test.dunit.rules.LocatorServerStartupRule;
-import org.apache.geode.test.dunit.rules.MemberVM;
-import org.apache.geode.test.junit.categories.DistributedTest;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import java.io.IOException;
-import java.time.LocalDateTime;
-import java.time.ZoneId;
-import java.time.ZonedDateTime;
-import java.time.format.DateTimeFormatter;
-import java.util.HashSet;
-import java.util.Properties;
-import java.util.Set;
-import java.util.stream.Collectors;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipFile;
-
-@Category(DistributedTest.class)
-public class ExportStatsDUnitTest {
-  @ClassRule
-  public static LocatorServerStartupRule lsRule = new LocatorServerStartupRule();
-
-  @ClassRule
-  public static GfshShellConnectionRule connector = new GfshShellConnectionRule();
-
-  protected static int jmxPort, httpPort;
-  protected static Set<String> expectedZipEntries = new HashSet<>();
-  protected static MemberVM locator;
-
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    httpPort = ports[0];
-    jmxPort = ports[1];
-    Properties locatorProperties = new Properties();
-    locatorProperties.setProperty(HTTP_SERVICE_BIND_ADDRESS, "localhost");
-    locatorProperties.setProperty(HTTP_SERVICE_PORT, httpPort + "");
-    locatorProperties.setProperty(JMX_MANAGER_PORT, jmxPort + "");
-
-    // start the locator in vm0 and then connect to it over http
-    locator = lsRule.startLocatorVM(0, locatorProperties);
-
-    Properties serverProperties = new Properties();
-    serverProperties.setProperty(ConfigurationProperties.STATISTIC_SAMPLING_ENABLED, "true");
-    serverProperties.setProperty(STATISTIC_ARCHIVE_FILE, "statistics.gfs");
-    lsRule.startServerVM(1, serverProperties, locator.getPort());
-
-    expectedZipEntries = Sets.newHashSet("locator-0/locator-0.log", "server-1/server-1.log",
-        "server-1/statistics.gfs");
-  }
-
-  protected void connectIfNeeded() throws Exception {
-    if (!connector.isConnected()) {
-      connector.connect(locator);
-    }
-  }
-
-  @Test
-  public void testExportLogsAndStats() throws Exception {
-    connectIfNeeded();
-    connector.executeAndVerifyCommand("export logs");
-    String zipPath = getZipPathFromCommandResult(connector.getGfshOutput());
-    Set<String> actualZipEnries = getZipEntries(zipPath);
-
-    Set<String> expectedFiles = Sets.newHashSet("locator-0/locator-0.log", "server-1/server-1.log",
-        "server-1/statistics.gfs");
-    assertThat(actualZipEnries).isEqualTo(expectedFiles);
-  }
-
-  @Test
-  public void testExportLogsOnly() throws Exception {
-    connectIfNeeded();
-    connector.executeAndVerifyCommand("export logs --logs-only");
-    String zipPath = getZipPathFromCommandResult(connector.getGfshOutput());
-    Set<String> actualZipEnries = getZipEntries(zipPath);
-
-    Set<String> expectedFiles = Sets.newHashSet("locator-0/locator-0.log", "server-1/server-1.log");
-    assertThat(actualZipEnries).isEqualTo(expectedFiles);
-  }
-
-  @Test
-  public void testExportStatsOnly() throws Exception {
-    connectIfNeeded();
-    connector.executeAndVerifyCommand("export logs --stats-only");
-    String zipPath = getZipPathFromCommandResult(connector.getGfshOutput());
-    Set<String> actualZipEnries = getZipEntries(zipPath);
-
-    Set<String> expectedFiles = Sets.newHashSet("server-1/statistics.gfs");
-    assertThat(actualZipEnries).isEqualTo(expectedFiles);
-  }
-
-  @Test
-  public void startAndEndDateCanExcludeLogs() throws Exception {
-    connectIfNeeded();
-    ZonedDateTime now = LocalDateTime.now().atZone(ZoneId.systemDefault());
-    ZonedDateTime yesterday = now.minusDays(1);
-    ZonedDateTime twoDaysAgo = now.minusDays(2);
-
-    DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern(ONLY_DATE_FORMAT);
-
-    CommandStringBuilder commandStringBuilder = new CommandStringBuilder("export logs");
-    commandStringBuilder.addOption("start-time", dateTimeFormatter.format(twoDaysAgo));
-    commandStringBuilder.addOption("end-time", dateTimeFormatter.format(yesterday));
-    commandStringBuilder.addOption("log-level", "debug");
-
-    connector.executeAndVerifyCommand(commandStringBuilder.toString());
-    String zipPath = getZipPathFromCommandResult(connector.getGfshOutput());
-
-    Set<String> actualZipEnries = getZipEntries(zipPath);
-    assertThat(actualZipEnries).hasSize(0);
-  }
-
-  protected String getZipPathFromCommandResult(String message) {
-    return message.replaceAll("Logs exported to the connected member's file system: ", "").trim();
-  }
-
-  private static Set<String> getZipEntries(String zipFilePath) throws IOException {
-    return new ZipFile(zipFilePath).stream().map(ZipEntry::getName).collect(Collectors.toSet());
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/55a2a3ce/geode-core/src/test/java/org/apache/geode/management/internal/web/controllers/ExportLogControllerTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/web/controllers/ExportLogControllerTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/web/controllers/ExportLogControllerTest.java
new file mode 100644
index 0000000..bee7db2
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/web/controllers/ExportLogControllerTest.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.management.internal.web.controllers;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.geode.management.internal.cli.CommandResponseBuilder;
+import org.apache.geode.management.internal.cli.result.CommandResult;
+import org.apache.geode.management.internal.cli.result.ResultBuilder;
+import org.apache.geode.test.junit.categories.UnitTest;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.springframework.core.io.InputStreamResource;
+import org.springframework.http.HttpHeaders;
+import org.springframework.http.MediaType;
+import org.springframework.http.ResponseEntity;
+
+@Category(UnitTest.class)
+public class ExportLogControllerTest {
+  private ExportLogController controller;
+
+  @Before
+  public void before() throws Exception {
+    controller = new ExportLogController();
+  }
+
+  @Test
+  public void testErrorResponse() throws Exception {
+    String message = "No Members Found";
+    CommandResult result = (CommandResult) ResultBuilder.createUserErrorResult(message);
+    String responseJson = CommandResponseBuilder.createCommandResponseJson("memberName", result);
+
+    ResponseEntity<InputStreamResource> resp = controller.getResponse(responseJson);
+    HttpHeaders headers = resp.getHeaders();
+    assertThat(headers.get(HttpHeaders.CONTENT_TYPE).get(0))
+        .isEqualTo(MediaType.APPLICATION_JSON_VALUE);
+
+    InputStreamResource body = resp.getBody();
+    assertThat(IOUtils.toString(body.getInputStream(), "utf-8")).isEqualTo(responseJson);
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/55a2a3ce/geode-core/src/test/java/org/apache/geode/test/dunit/rules/GfshShellConnectionRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/GfshShellConnectionRule.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/GfshShellConnectionRule.java
index f367458..3b1c99a 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/GfshShellConnectionRule.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/GfshShellConnectionRule.java
@@ -86,11 +86,11 @@ public class GfshShellConnectionRule extends DescribedExternalResource {
 
   }
 
-  public void connect(MemberVM locator, String... options) throws Exception {
+  public void connect(Member locator, String... options) throws Exception {
     connect(locator.getPort(), PortType.locator, options);
   }
 
-  public void connectAndVerify(MemberVM locator, String... options) throws Exception {
+  public void connectAndVerify(Member locator, String... options) throws Exception {
     connect(locator.getPort(), PortType.locator, options);
     assertThat(this.connected).isTrue();
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/55a2a3ce/geode-web/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsOverHttpDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-web/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsOverHttpDUnitTest.java b/geode-web/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsOverHttpDUnitTest.java
deleted file mode 100644
index cc4ae28..0000000
--- a/geode-web/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsOverHttpDUnitTest.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.geode.management.internal.cli.commands;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-import org.apache.geode.test.dunit.rules.GfshShellConnectionRule;
-import org.apache.geode.test.junit.categories.DistributedTest;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.util.Arrays;
-import java.util.Set;
-import java.util.stream.Collectors;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipFile;
-
-@Category(DistributedTest.class)
-public class ExportLogsOverHttpDUnitTest extends ExportStatsDUnitTest {
-
-  @Rule
-  public TemporaryFolder temporaryFolder = new TemporaryFolder();
-
-  @Override
-  public void connectIfNeeded() throws Exception {
-    if (!connector.isConnected())
-      connector.connect(httpPort, GfshShellConnectionRule.PortType.http);
-  }
-
-  @Test
-  public void testExportWithDir() throws Exception {
-    connectIfNeeded();
-    File dir = temporaryFolder.newFolder();
-    // export the logs
-    connector.executeCommand("export logs --dir=" + dir.getAbsolutePath());
-    // verify that the message contains a path to the user.dir
-    String message = connector.getGfshOutput();
-    assertThat(message).contains("Logs exported to: ");
-    assertThat(message).contains(dir.getAbsolutePath());
-
-    String zipPath = getZipPathFromCommandResult(message);
-    Set<String> actualZipEntries =
-        new ZipFile(zipPath).stream().map(ZipEntry::getName).collect(Collectors.toSet());
-
-    assertThat(actualZipEntries).isEqualTo(expectedZipEntries);
-
-    // also verify that the zip file on locator is deleted
-    assertThat(Arrays.stream(locator.getWorkingDir().listFiles())
-        .filter(file -> file.getName().endsWith(".zip")).collect(Collectors.toSet())).isEmpty();
-  }
-
-  protected String getZipPathFromCommandResult(String message) {
-    return message.replaceAll("Logs exported to: ", "").trim();
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/55a2a3ce/geode-web/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsOverHttpIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-web/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsOverHttpIntegrationTest.java b/geode-web/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsOverHttpIntegrationTest.java
new file mode 100644
index 0000000..420f2dd
--- /dev/null
+++ b/geode-web/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsOverHttpIntegrationTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.management.internal.cli.commands;
+
+import org.apache.geode.test.dunit.rules.GfshShellConnectionRule;
+import org.apache.geode.test.junit.categories.IntegrationTest;
+import org.junit.experimental.categories.Category;
+
+@Category(IntegrationTest.class)
+public class ExportLogsOverHttpIntegrationTest extends ExportLogsIntegrationTest {
+
+  @Override
+  protected void connect() throws Exception {
+    gfsh.connectAndVerify(httpPort, GfshShellConnectionRule.PortType.http);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/55a2a3ce/geode-web/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsStatsOverHttpDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-web/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsStatsOverHttpDUnitTest.java b/geode-web/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsStatsOverHttpDUnitTest.java
new file mode 100644
index 0000000..c0b08cf
--- /dev/null
+++ b/geode-web/src/test/java/org/apache/geode/management/internal/cli/commands/ExportLogsStatsOverHttpDUnitTest.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.management.internal.cli.commands;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import org.apache.geode.test.dunit.rules.GfshShellConnectionRule;
+import org.apache.geode.test.junit.categories.DistributedTest;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TemporaryFolder;
+
+import java.io.File;
+import java.util.Arrays;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipFile;
+
+@Category(DistributedTest.class)
+public class ExportLogsStatsOverHttpDUnitTest extends ExportLogsStatsDUnitTest {
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+  @Override
+  public void connectIfNeeded() throws Exception {
+    if (!connector.isConnected())
+      connector.connect(httpPort, GfshShellConnectionRule.PortType.http);
+  }
+
+  @Test
+  public void testExportWithDir() throws Exception {
+    connectIfNeeded();
+    File dir = temporaryFolder.newFolder();
+    // export the logs
+    connector.executeCommand("export logs --dir=" + dir.getAbsolutePath());
+    // verify that the message contains a path to the user.dir
+    String message = connector.getGfshOutput();
+    assertThat(message).contains("Logs exported to: ");
+    assertThat(message).contains(dir.getAbsolutePath());
+
+    String zipPath = getZipPathFromCommandResult(message);
+    Set<String> actualZipEntries =
+        new ZipFile(zipPath).stream().map(ZipEntry::getName).collect(Collectors.toSet());
+
+    assertThat(actualZipEntries).isEqualTo(expectedZipEntries);
+
+    // also verify that the zip file on locator is deleted
+    assertThat(Arrays.stream(locator.getWorkingDir().listFiles())
+        .filter(file -> file.getName().endsWith(".zip")).collect(Collectors.toSet())).isEmpty();
+  }
+
+  protected String getZipPathFromCommandResult(String message) {
+    return message.replaceAll("Logs exported to: ", "").trim();
+  }
+}


[09/10] geode git commit: GEODE-2379 Document new behavior of export logs This closes #425

Posted by kl...@apache.org.
GEODE-2379 Document new behavior of export logs
This closes #425


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/f329f4a5
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/f329f4a5
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/f329f4a5

Branch: refs/heads/feature/GEODE-2645
Commit: f329f4a54c2bdfa254f2a9a9be1f535a9899cd85
Parents: 6b4e4f2
Author: Dave Barnes <db...@pivotal.io>
Authored: Tue Mar 14 12:48:52 2017 -0700
Committer: Dave Barnes <db...@pivotal.io>
Committed: Wed Mar 15 10:35:42 2017 -0700

----------------------------------------------------------------------
 .../gfsh/command-pages/export.html.md.erb       | 46 ++++++++------------
 1 file changed, 19 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/f329f4a5/geode-docs/tools_modules/gfsh/command-pages/export.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/gfsh/command-pages/export.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/export.html.md.erb
index 690bf07..d8dbb1b 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/export.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/export.html.md.erb
@@ -23,27 +23,27 @@ limitations under the License.
 
 Export configurations, data, logs and stack-traces.
 
--   **[export cluster-configuration](../../../tools_modules/gfsh/command-pages/export.html#topic_mdv_jgz_ck)**
+-   **[export cluster-configuration](#topic_mdv_jgz_ck)**
 
     Exports a cluster configuration zip file that contains the `cache.xml` files, `gemfire.properties` files, and application jar files needed to configure and operate a Geode distributed system.
 
--   **[export config](../../../tools_modules/gfsh/command-pages/export.html#topic_C7C69306F93743459E65D46537F4A1EE)**
+-   **[export config](#topic_C7C69306F93743459E65D46537F4A1EE)**
 
     Export configuration properties for a member or members.
 
--   **[export data](../../../tools_modules/gfsh/command-pages/export.html#topic_263B70069BFC4A7185F86B3272011734)**
+-   **[export data](#topic_263B70069BFC4A7185F86B3272011734)**
 
     Export user data from a region to a file.
 
--   **[export logs](../../../tools_modules/gfsh/command-pages/export.html#topic_B80978CC659244AE91E2B8CE56EBDFE3)**
+-   **[export logs](#topic_B80978CC659244AE91E2B8CE56EBDFE3)**
 
     Export logs to a given directory.
 
--   **[export offline-disk-store](../../../tools_modules/gfsh/command-pages/export.html#topic_sjg_bvt_gq)**
+-   **[export offline-disk-store](#topic_sjg_bvt_gq)**
 
     Export region data from an offline disk store into gemfire snapshot files.
 
--   **[export stack-traces](../../../tools_modules/gfsh/command-pages/export.html#topic_195D27B8B2B64A4E84CF2256636D54BD)**
+-   **[export stack-traces](#topic_195D27B8B2B64A4E84CF2256636D54BD)**
 
     Export the stack trace for a member or members.
 
@@ -61,8 +61,6 @@ See [Overview of the Cluster Configuration Service](../../../configuring/cluster
 export cluster-configuration --zip-file-name=value [--dir=value]
 ```
 
-<a id="topic_mdv_jgz_ck__table_qyw_hbh_2w"></a>
-
 | Name                                                  | Description                                                                          | Default Value     |
 |-------------------------------------------------------|--------------------------------------------------------------------------------------|-------------------|
 | <span class="keyword parmname">\\-\\-zip-file-name</span> | *Required.* File name of the zip file to contain the exported cluster configuration. | �                 |
@@ -98,8 +96,6 @@ export config [--member=value(,value)*] [--group=value(,value)*]
 [--dir=value]
 ```
 
-<a id="topic_C7C69306F93743459E65D46537F4A1EE__table_u5w_2bh_2w"></a>
-
 | Name                                           | Description                                                          |
 |------------------------------------------------|----------------------------------------------------------------------|
 | <span class="keyword parmname">\\-\\-member</span> | Name/Id of the member(s) whose configuration will be exported.       |
@@ -135,8 +131,6 @@ Export user data from a region to a file.
 export data --region=value --file=value --member=value
 ```
 
-<a id="topic_263B70069BFC4A7185F86B3272011734__table_lds_lbh_2w"></a>
-
 | Name                                           | Description                                                                                                                                     |
 |------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------|
 | <span class="keyword parmname">\\-\\-region</span> | *Required.* Region from which data is to be exported.                                                                                           |
@@ -167,28 +161,30 @@ Export logs to a given directory.
 
 All files that have logs in the specified time range will be exported. If no time range is specified, all logs will be exported.
 
+The `--dir` parameter specifies a local directory to which log files will be written. This is used only when you are exporting logs using an http connection.  If executed over http, the zip archive will be saved in the specified directory on the user's client machine. If not specified, logs are written to the location specified by the `user.dir` system property. When the command is executed over JMX, logs will be saved as `exportedlogs_xxx.zip` in the connected locator's working directory.
+
 **Availability:** Online. You must be connected in `gfsh` to a JMX Manager member to use this command.
 
 **Syntax:**
 
 ``` pre
-export logs --dir=value [--group=value(,value)*] [--member=value] [--log
--level=value] [--only-log-level=value] [--merge-log=value] [--start-time=value]
-[--end-time=value]
+export logs [--dir=value] [--group=value(,value)*] [--member=value(,value)*] 
+[--log-level=value] [--only-log-level=value] [--merge-log=value] 
+[--start-time=value] [--end-time=value] [logs-only(=value)?] [--stats-only(=value)?]
 ```
 
-<a id="topic_B80978CC659244AE91E2B8CE56EBDFE3__table_xw5_4bh_2w"></a>
-
 | Name                                                   | Description                                                                                                                | Default Value |
 |--------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|---------------|
-| <span class="keyword parmname">\\-\\-dir</span>            | *Required.* Directory to which log files will be written.                                                                  | �             |
+| <span class="keyword parmname">\\-\\-dir</span>            | Local directory to which log files will be written when logs are exported using an http connection. Ignored when the command is executed over JMX.     | �             | 
 | <span class="keyword parmname">\\-\\-group</span>          | Group of members whose log files will be exported.                                                                         | �             |
 | <span class="keyword parmname">&#8209;&#8209;member</span>         | Name/ID of the member whose log files will be exported.                                                                    | �             |
-| <span class="keyword parmname">\\-\\-log-level</span>      | Minimum level of log entries to export. Valid values are: `none`, `error`, `info`, `config`, `fine`, `finer` and `finest`. | `info`        |
-| <span class="keyword parmname">\\-\\-only-log-level</span> | Whether to only include those entries that exactly match the <span class="keyword parmname">\\-\\-log-level</span> specified.  | false         |
-| <span class="keyword parmname">&#8209;&#8209;merge&#8209;log</span>      | Whether to merge logs after exporting to the target directory.                                                             | false         |
+| <span class="keyword parmname">\\-\\-log-level</span>      | Minimum level of log entries to export. Valid values are: `fatal`, `error`, `warn`, `info`, `debug`, `trace`, and `all`. | `info`        |
+| <span class="keyword parmname">\\-\\-only-log-level</span> | Whether to only include only entries that exactly match the <span class="keyword parmname">\\-\\-log-level</span> specified.  | false         |
+| <span class="keyword parmname">&#8209;&#8209;merge&#8209;log</span>      | Whether to merge logs after exporting to the target directory (deprecated).                                                             | false         |
 | <span class="keyword parmname">\\-\\-start-time</span>     | Log entries that occurred after this time will be exported. Format: yyyy/MM/dd/HH/mm/ss/SSS/z OR yyyy/MM/dd                | no limit      |
 | <span class="keyword parmname">\\-\\-end-time</span>       | Log entries that occurred before this time will be exported. Format: yyyy/MM/dd/HH/mm/ss/SSS/z OR yyyy/MM/dd               | no limit      |
+| <span class="keyword parmname">\\-\\-logs-only</span>       | Whether to export only logs (not statistics)               | If parameter not specified: false. If parameter specified without a value: true      |
+| <span class="keyword parmname">\\-\\-stats-only</span>       | Whether to export only statistics (not logs)               | If parameter not specified: false. If parameter specified without a value: true      |
 
 <span class="tablecap">Table 4. Export Logs Parameters</span>
 
@@ -201,8 +197,8 @@ export logs --dir=data/logs
 **Sample Output:**
 
 ``` pre
-gfsh>export logs --dir=data/logs
-Successfully exported to directory data/logs
+gfsh>export logs
+Logs exported to the connected member's file system: /data/my-locator/exportedLogs_1489513007261.zip
 ```
 
 ## <a id="topic_sjg_bvt_gq" class="no-quick-link"></a>export offline-disk-store
@@ -217,8 +213,6 @@ Export region data from an offline disk store into gemfire snapshot files.
 export offline-disk-store --name=value --disk-dirs=value(,value)* --dir=value
 ```
 
-<a id="topic_sjg_bvt_gq__table_gzr_rbh_2w"></a>
-
 | Name                                              | Description                                        |
 |---------------------------------------------------|----------------------------------------------------|
 | <span class="keyword parmname">\\-\\-name</span>      | *Required.* Name of the disk store to be exported. |
@@ -246,8 +240,6 @@ Export the stack trace for a member or members.
 export stack-traces --file=value [--member=value] [--group=value]
 ```
 
-<a id="topic_195D27B8B2B64A4E84CF2256636D54BD__table_cjr_wbh_2w"></a>
-
 | Name                                           | Description                                                     |
 |------------------------------------------------|-----------------------------------------------------------------|
 | <span class="keyword parmname">\\-\\-file</span>   | *Required.* Filename to which the stack-traces will be written. |