You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2017/05/31 17:01:06 UTC

[01/32] geode git commit: GEODE-2964: add common-collections to gfsh dependencies [Forced Update!]

Repository: geode
Updated Branches:
  refs/heads/feature/GEODE-1279 8b731c577 -> 644040372 (forced update)


GEODE-2964: add common-collections to gfsh dependencies


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/f9099df5
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/f9099df5
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/f9099df5

Branch: refs/heads/feature/GEODE-1279
Commit: f9099df50ae061a158a00f1c3a69327bbf583d1a
Parents: 456ee15
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Tue May 23 14:57:19 2017 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Tue May 23 16:37:24 2017 -0700

----------------------------------------------------------------------
 geode-assembly/build.gradle | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/f9099df5/geode-assembly/build.gradle
----------------------------------------------------------------------
diff --git a/geode-assembly/build.gradle b/geode-assembly/build.gradle
index a4f0c69..c308d30 100755
--- a/geode-assembly/build.gradle
+++ b/geode-assembly/build.gradle
@@ -131,12 +131,13 @@ def cp = {
     .join(' ')
 
   // then add all the dependencies of the dependent jars
-  jars += ' ' + configurations.archives.dependencies.collect { 
+  jars += ' ' + configurations.archives.dependencies.collect {
     it.dependencyProject.findAll { !(it.name.contains('web-api') || it.name.contains('pulse')) }
       .collect { it.configurations.runtime.collect { it.getName() }.findAll {
         // depedencies from geode-core
         it.contains('antlr') ||
         it.contains('commons-io') ||
+        it.contains('commons-collections') ||
         it.contains('commons-lang') ||
         it.contains('commons-logging') ||
         it.contains('fast-classpath-scanner') ||


[23/32] geode git commit: Revert "GEODE-2957: Create Lucene index analyzer help updated to include keyword DEFAULT"

Posted by kl...@apache.org.
Revert "GEODE-2957: Create Lucene index analyzer help updated to include keyword DEFAULT"

This reverts commit 96665fafbcc06948b7152ca9ad7344ab938f27ff.


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/5b2cdf8c
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/5b2cdf8c
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/5b2cdf8c

Branch: refs/heads/feature/GEODE-1279
Commit: 5b2cdf8c1499c8fdc1f76ad9351c30876f6623b2
Parents: 96665fa
Author: nabarun <nn...@pivotal.io>
Authored: Tue May 30 12:14:24 2017 -0700
Committer: nabarun <nn...@pivotal.io>
Committed: Tue May 30 12:14:24 2017 -0700

----------------------------------------------------------------------
 .../apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/5b2cdf8c/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
index 8104b3f..db9f7b9 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
@@ -43,7 +43,7 @@ public class LuceneCliStrings {
       "Fields on the region values which are stored in the lucene index.\nUse __REGION_VALUE_FIELD if the entire region value should be indexed.\n__REGION_VALUE_FIELD is valid only if the region values are strings or numbers.";
   public static final String LUCENE_CREATE_INDEX__ANALYZER = "analyzer";
   public static final String LUCENE_CREATE_INDEX__ANALYZER_HELP =
-      "Type of the analyzer for each field.\nUse the case sensitive keyword DEFAULT or leave an analyzer blank to use the default standard analyzer.";
+      "Type of the analyzer for each field.";
   public static final String CREATE_INDEX__SUCCESS__MSG =
       "Index successfully created with following details";
   public static final String CREATE_INDEX__FAILURE__MSG =


[03/32] geode git commit: GEODE-1994: Removed two references to ServerLauncher.setMemberName that are guaranteed to throw under the changes introduced by d16d192.

Posted by kl...@apache.org.
GEODE-1994: Removed two references to ServerLauncher.setMemberName that are guaranteed to throw under the changes introduced by d16d192.


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/dff937f3
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/dff937f3
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/dff937f3

Branch: refs/heads/feature/GEODE-1279
Commit: dff937f32b1cb1cce6269226dc17263484685bf1
Parents: ca12f78
Author: Patrick Rhomberg <pr...@pivotal.io>
Authored: Tue May 23 15:07:11 2017 -0700
Committer: Jared Stewart <js...@pivotal.io>
Committed: Wed May 24 09:37:27 2017 -0700

----------------------------------------------------------------------
 .../cli/commands/LauncherLifecycleCommands.java       | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/dff937f3/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
index b6c11c4..74acfd6 100755
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/LauncherLifecycleCommands.java
@@ -1797,12 +1797,12 @@ public class LauncherLifecycleCommands extends AbstractCommandsSupport {
         final ServerLauncher serverLauncher = new ServerLauncher.Builder()
             .setCommand(ServerLauncher.Command.STATUS).setDebug(isDebugging())
             // NOTE since we do not know whether the "CacheServer" was enabled or not on the GemFire
-            // server when it was started,
-            // set the disableDefaultServer property in the ServerLauncher.Builder to default status
+            // server when it was started, set the disableDefaultServer property in the
+            // ServerLauncher.Builder to default status
             // to the MemberMBean
             // TODO fix this hack! (how, the 'start server' loop needs it)
-            .setDisableDefaultServer(true).setMemberName(member).setPid(pid)
-            .setWorkingDirectory(workingDirectory).build();
+            .setDisableDefaultServer(true).setPid(pid).setWorkingDirectory(workingDirectory)
+            .build();
 
         final ServerState status = serverLauncher.status();
 
@@ -1854,9 +1854,9 @@ public class LauncherLifecycleCommands extends AbstractCommandsSupport {
               .format(CliStrings.STOP_SERVICE__GFSH_NOT_CONNECTED_ERROR_MESSAGE, "Cache Server"));
         }
       } else {
-        final ServerLauncher serverLauncher = new ServerLauncher.Builder()
-            .setCommand(ServerLauncher.Command.STOP).setDebug(isDebugging()).setMemberName(member)
-            .setPid(pid).setWorkingDirectory(workingDirectory).build();
+        final ServerLauncher serverLauncher =
+            new ServerLauncher.Builder().setCommand(ServerLauncher.Command.STOP)
+                .setDebug(isDebugging()).setPid(pid).setWorkingDirectory(workingDirectory).build();
 
         serverState = serverLauncher.status();
         serverLauncher.stop();


[02/32] geode git commit: GEODE-2970: clearing LogWriterAppender when shutting down locator.

Posted by kl...@apache.org.
GEODE-2970: clearing LogWriterAppender when shutting down locator.

* Do not bury the NPE in AlterRuntimeConfigFunction
* destroy the LogWriterAppender when removing the locator
* added test


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/ca12f781
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/ca12f781
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/ca12f781

Branch: refs/heads/feature/GEODE-1279
Commit: ca12f781c14409ee87873f604be64d98952c0a9a
Parents: f9099df
Author: Jinmei Liao <ji...@pivotal.io>
Authored: Mon May 22 08:47:37 2017 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Wed May 24 08:42:24 2017 -0700

----------------------------------------------------------------------
 .../org/apache/geode/distributed/Locator.java   |  12 +-
 .../geode/distributed/LocatorLauncher.java      |   2 +-
 .../distributed/internal/InternalLocator.java   |  35 +--
 .../geode/internal/DistributionLocator.java     |   2 +-
 .../internal/cli/commands/ConfigCommands.java   | 245 +++++++++----------
 .../functions/AlterRuntimeConfigFunction.java   |  13 +-
 .../InternalLocatorIntegrationTest.java         |  79 ++++++
 .../cli/commands/ConfigCommandsDUnitTest.java   |  14 +-
 .../dunit/rules/LocatorServerStartupRule.java   |   5 +-
 .../apache/geode/test/dunit/rules/MemberVM.java |  11 +-
 10 files changed, 231 insertions(+), 187 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/ca12f781/geode-core/src/main/java/org/apache/geode/distributed/Locator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/Locator.java b/geode-core/src/main/java/org/apache/geode/distributed/Locator.java
index 87cd243..645b261 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/Locator.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/Locator.java
@@ -14,6 +14,10 @@
  */
 package org.apache.geode.distributed;
 
+import org.apache.geode.distributed.internal.InternalLocator;
+import org.apache.geode.internal.i18n.LocalizedStrings;
+import org.apache.geode.internal.net.SocketCreator;
+
 import java.io.File;
 import java.io.IOException;
 import java.net.InetAddress;
@@ -21,10 +25,6 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
-import org.apache.geode.distributed.internal.InternalLocator;
-import org.apache.geode.internal.net.SocketCreator;
-import org.apache.geode.internal.i18n.LocalizedStrings;
-
 /**
  * Represents a distribution locator server that provides discovery information to members and
  * clients of a GemFire distributed system. In most GemFire distributed cache architectures,
@@ -250,8 +250,8 @@ public abstract class Locator {
   private static Locator startLocator(int port, File logFile, InetAddress bindAddress,
       java.util.Properties dsProperties, boolean peerLocator, boolean serverLocator,
       String hostnameForClients) throws IOException {
-    return InternalLocator.startLocator(port, logFile, null, null, null, bindAddress, dsProperties,
-        hostnameForClients);
+    return InternalLocator.startLocator(port, logFile, null, null, null, bindAddress, true,
+        dsProperties, hostnameForClients);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/geode/blob/ca12f781/geode-core/src/main/java/org/apache/geode/distributed/LocatorLauncher.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/LocatorLauncher.java b/geode-core/src/main/java/org/apache/geode/distributed/LocatorLauncher.java
index 12c5c21..43ab546 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/LocatorLauncher.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/LocatorLauncher.java
@@ -649,7 +649,7 @@ public class LocatorLauncher extends AbstractLauncher<String> {
         // TODO : remove the extra param for loadFromSharedConfigDir
         try {
           this.locator = InternalLocator.startLocator(getPort(), getLogFile(), null, null, null,
-              getBindAddress(), getDistributedSystemProperties(), getHostnameForClients());
+              getBindAddress(), true, getDistributedSystemProperties(), getHostnameForClients());
         } finally {
           ProcessLauncherContext.remove();
         }

http://git-wip-us.apache.org/repos/asf/geode/blob/ca12f781/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java
index 6500385..c299dd0 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/InternalLocator.java
@@ -228,11 +228,13 @@ public class InternalLocator extends Locator implements ConnectListener {
       return false;
     }
     synchronized (locatorLock) {
-      if (hasLocator()) {
-        if (locator.equals(InternalLocator.locator)) {
-          InternalLocator.locator = null;
-          return true;
-        }
+      LogWriterAppenders.stop(LogWriterAppenders.Identifier.MAIN);
+      LogWriterAppenders.stop(LogWriterAppenders.Identifier.SECURITY);
+      LogWriterAppenders.destroy(LogWriterAppenders.Identifier.MAIN);
+      LogWriterAppenders.destroy(LogWriterAppenders.Identifier.SECURITY);
+      if (locator != null && locator.equals(InternalLocator.locator)) {
+        InternalLocator.locator = null;
+        return true;
       }
       return false;
     }
@@ -284,26 +286,6 @@ public class InternalLocator extends Locator implements ConnectListener {
   }
 
   /**
-   * Creates a distribution locator that runs in this VM on the given port and bind address and
-   * creates a distributed system.
-   * 
-   * @param port the tcp/ip port to listen on
-   * @param logFile the file that log messages should be written to
-   * @param logger a log writer that should be used (logFile parameter is ignored)
-   * @param securityLogger the logger to be used for security related log messages
-   * @param dsProperties optional properties to configure the distributed system (e.g., mcast
-   *        addr/port, other locators)
-   * @param hostnameForClients the name to give to clients for connecting to this locator
-   * @since GemFire 7.0
-   */
-  public static InternalLocator startLocator(int port, File logFile, File stateFile,
-      InternalLogWriter logger, InternalLogWriter securityLogger, InetAddress bindAddress,
-      Properties dsProperties, String hostnameForClients) throws IOException {
-    return startLocator(port, logFile, stateFile, logger, securityLogger, bindAddress, true,
-        dsProperties, hostnameForClients);
-  }
-
-  /**
    * Creates a distribution locator that runs in this VM on the given port and bind address.
    * <p>
    * This is for internal use only as it does not create a distributed system unless told to do so.
@@ -615,7 +597,8 @@ public class InternalLocator extends Locator implements ConnectListener {
       InternalLogWriter logger, InternalLogWriter logger1, InetAddress addr,
       Properties dsProperties, boolean peerLocator, boolean serverLocator, String s, boolean b1)
       throws IOException {
-    return startLocator(locatorPort, logFile, stateFile, logger, logger1, addr, dsProperties, s);
+    return startLocator(locatorPort, logFile, stateFile, logger, logger1, addr, true, dsProperties,
+        s);
   }
 
   class SharedConfigurationRunnable implements Runnable {

http://git-wip-us.apache.org/repos/asf/geode/blob/ca12f781/geode-core/src/main/java/org/apache/geode/internal/DistributionLocator.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/DistributionLocator.java b/geode-core/src/main/java/org/apache/geode/internal/DistributionLocator.java
index f861515..e190d0b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/DistributionLocator.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/DistributionLocator.java
@@ -167,7 +167,7 @@ public class DistributionLocator {
       try {
 
         InternalLocator locator = InternalLocator.startLocator(port, new File(DEFAULT_LOG_FILE),
-            null, null, null, address, (Properties) null, hostnameForClients);
+            null, null, null, address, true, (Properties) null, hostnameForClients);
 
         ManagerInfo.setLocatorStarted(directory, port, address);
         locator.waitToStop();

http://git-wip-us.apache.org/repos/asf/geode/blob/ca12f781/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
index ca2de76..6d3f50f 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
@@ -18,11 +18,11 @@ import static org.apache.geode.distributed.ConfigurationProperties.STATISTIC_SAM
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.geode.SystemFailure;
-import org.apache.geode.cache.CacheClosedException;
 import org.apache.geode.cache.execute.FunctionInvocationTargetException;
 import org.apache.geode.cache.execute.ResultCollector;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.internal.cache.xmlcache.CacheXml;
+import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.internal.logging.log4j.LogLevel;
 import org.apache.geode.management.cli.CliMetaData;
 import org.apache.geode.management.cli.ConverterHint;
@@ -47,6 +47,7 @@ import org.apache.geode.management.internal.configuration.domain.XmlEntity;
 import org.apache.geode.management.internal.security.ResourceOperation;
 import org.apache.geode.security.ResourcePermission.Operation;
 import org.apache.geode.security.ResourcePermission.Resource;
+import org.apache.logging.log4j.Logger;
 import org.springframework.shell.core.annotation.CliAvailabilityIndicator;
 import org.springframework.shell.core.annotation.CliCommand;
 import org.springframework.shell.core.annotation.CliOption;
@@ -72,6 +73,7 @@ public class ConfigCommands extends AbstractCommandsSupport {
       new GetMemberConfigInformationFunction();
   private final AlterRuntimeConfigFunction alterRunTimeConfigFunction =
       new AlterRuntimeConfigFunction();
+  private static Logger logger = LogService.getLogger();
 
   @CliCommand(value = {CliStrings.DESCRIBE_CONFIG}, help = CliStrings.DESCRIBE_CONFIG__HELP)
   @CliMetaData(relatedTopic = {CliStrings.TOPIC_GEODE_CONFIG})
@@ -252,10 +254,10 @@ public class ConfigCommands extends AbstractCommandsSupport {
   public Result alterRuntimeConfig(
       @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__MEMBER},
           optionContext = ConverterHint.ALL_MEMBER_IDNAME,
-          help = CliStrings.ALTER_RUNTIME_CONFIG__MEMBER__HELP) String memberNameOrId,
+          help = CliStrings.ALTER_RUNTIME_CONFIG__MEMBER__HELP) String[] memberNameOrId,
       @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__GROUP},
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.ALTER_RUNTIME_CONFIG__MEMBER__HELP) String group,
+          help = CliStrings.ALTER_RUNTIME_CONFIG__MEMBER__HELP) String[] group,
       @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__DISK__SPACE__LIMIT},
           help = CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__DISK__SPACE__LIMIT__HELP) Integer archiveDiskSpaceLimit,
       @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__FILE__SIZE__LIMIT},
@@ -287,153 +289,138 @@ public class ConfigCommands extends AbstractCommandsSupport {
 
     Map<String, String> runTimeDistributionConfigAttributes = new HashMap<>();
     Map<String, String> rumTimeCacheAttributes = new HashMap<>();
-    Set<DistributedMember> targetMembers;
-
-    try {
+    Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
 
-      targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrId);
+    if (targetMembers.isEmpty()) {
+      return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+    }
 
-      if (archiveDiskSpaceLimit != null) {
-        runTimeDistributionConfigAttributes.put(
-            CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__DISK__SPACE__LIMIT,
-            archiveDiskSpaceLimit.toString());
-      }
+    if (archiveDiskSpaceLimit != null) {
+      runTimeDistributionConfigAttributes.put(
+          CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__DISK__SPACE__LIMIT,
+          archiveDiskSpaceLimit.toString());
+    }
 
-      if (archiveFileSizeLimit != null) {
-        runTimeDistributionConfigAttributes.put(
-            CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__FILE__SIZE__LIMIT,
-            archiveFileSizeLimit.toString());
-      }
+    if (archiveFileSizeLimit != null) {
+      runTimeDistributionConfigAttributes.put(
+          CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__FILE__SIZE__LIMIT,
+          archiveFileSizeLimit.toString());
+    }
 
-      if (logDiskSpaceLimit != null) {
-        runTimeDistributionConfigAttributes.put(
-            CliStrings.ALTER_RUNTIME_CONFIG__LOG__DISK__SPACE__LIMIT, logDiskSpaceLimit.toString());
-      }
+    if (logDiskSpaceLimit != null) {
+      runTimeDistributionConfigAttributes.put(
+          CliStrings.ALTER_RUNTIME_CONFIG__LOG__DISK__SPACE__LIMIT, logDiskSpaceLimit.toString());
+    }
 
-      if (logFileSizeLimit != null) {
-        runTimeDistributionConfigAttributes.put(
-            CliStrings.ALTER_RUNTIME_CONFIG__LOG__FILE__SIZE__LIMIT, logFileSizeLimit.toString());
-      }
+    if (logFileSizeLimit != null) {
+      runTimeDistributionConfigAttributes.put(
+          CliStrings.ALTER_RUNTIME_CONFIG__LOG__FILE__SIZE__LIMIT, logFileSizeLimit.toString());
+    }
 
-      if (logLevel != null && !logLevel.isEmpty()) {
-        runTimeDistributionConfigAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__LOG__LEVEL,
-            logLevel);
-      }
+    if (logLevel != null && !logLevel.isEmpty()) {
+      runTimeDistributionConfigAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__LOG__LEVEL,
+          logLevel);
+    }
 
-      if (statisticArchiveFile != null && !statisticArchiveFile.isEmpty()) {
-        runTimeDistributionConfigAttributes
-            .put(CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__ARCHIVE__FILE, statisticArchiveFile);
-      }
+    if (statisticArchiveFile != null && !statisticArchiveFile.isEmpty()) {
+      runTimeDistributionConfigAttributes
+          .put(CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__ARCHIVE__FILE, statisticArchiveFile);
+    }
 
-      if (statisticSampleRate != null) {
-        runTimeDistributionConfigAttributes.put(
-            CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__SAMPLE__RATE,
-            statisticSampleRate.toString());
-      }
+    if (statisticSampleRate != null) {
+      runTimeDistributionConfigAttributes.put(
+          CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__SAMPLE__RATE, statisticSampleRate.toString());
+    }
 
-      if (statisticSamplingEnabled != null) {
-        runTimeDistributionConfigAttributes.put(STATISTIC_SAMPLING_ENABLED,
-            statisticSamplingEnabled.toString());
-      }
+    if (statisticSamplingEnabled != null) {
+      runTimeDistributionConfigAttributes.put(STATISTIC_SAMPLING_ENABLED,
+          statisticSamplingEnabled.toString());
+    }
 
 
-      // Attributes that are set on the cache.
-      if (setCopyOnRead != null) {
-        rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__COPY__ON__READ,
-            setCopyOnRead.toString());
-      }
+    // Attributes that are set on the cache.
+    if (setCopyOnRead != null) {
+      rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__COPY__ON__READ,
+          setCopyOnRead.toString());
+    }
 
-      if (lockLease != null && lockLease > 0 && lockLease < Integer.MAX_VALUE) {
-        rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__LOCK__LEASE,
-            lockLease.toString());
-      }
+    if (lockLease != null && lockLease > 0 && lockLease < Integer.MAX_VALUE) {
+      rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__LOCK__LEASE,
+          lockLease.toString());
+    }
 
-      if (lockTimeout != null && lockTimeout > 0 && lockTimeout < Integer.MAX_VALUE) {
-        rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__LOCK__TIMEOUT,
-            lockTimeout.toString());
-      }
+    if (lockTimeout != null && lockTimeout > 0 && lockTimeout < Integer.MAX_VALUE) {
+      rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__LOCK__TIMEOUT,
+          lockTimeout.toString());
+    }
 
-      if (messageSyncInterval != null && messageSyncInterval > 0
-          && messageSyncInterval < Integer.MAX_VALUE) {
-        rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__MESSAGE__SYNC__INTERVAL,
-            messageSyncInterval.toString());
-      }
+    if (messageSyncInterval != null && messageSyncInterval > 0
+        && messageSyncInterval < Integer.MAX_VALUE) {
+      rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__MESSAGE__SYNC__INTERVAL,
+          messageSyncInterval.toString());
+    }
 
-      if (searchTimeout != null && searchTimeout > 0 && searchTimeout < Integer.MAX_VALUE) {
-        rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__SEARCH__TIMEOUT,
-            searchTimeout.toString());
-      }
+    if (searchTimeout != null && searchTimeout > 0 && searchTimeout < Integer.MAX_VALUE) {
+      rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__SEARCH__TIMEOUT,
+          searchTimeout.toString());
+    }
 
-      if (!runTimeDistributionConfigAttributes.isEmpty() || !rumTimeCacheAttributes.isEmpty()) {
-        Map<String, String> allRunTimeAttributes = new HashMap<>();
-        allRunTimeAttributes.putAll(runTimeDistributionConfigAttributes);
-        allRunTimeAttributes.putAll(rumTimeCacheAttributes);
-
-        ResultCollector<?, ?> rc = CliUtil.executeFunction(alterRunTimeConfigFunction,
-            allRunTimeAttributes, targetMembers);
-        List<CliFunctionResult> results = CliFunctionResult.cleanResults((List<?>) rc.getResult());
-        CompositeResultData crd = ResultBuilder.createCompositeResultData();
-        TabularResultData tabularData = crd.addSection().addTable();
-        Set<String> successfulMembers = new TreeSet<>();
-        Set<String> errorMessages = new TreeSet<>();
-
-
-        for (CliFunctionResult result : results) {
-          if (result.getThrowable() != null) {
-            errorMessages.add(result.getThrowable().getMessage());
-          } else {
-            successfulMembers.add(result.getMemberIdOrName());
-          }
-        }
-        final String lineSeparator = System.getProperty("line.separator");
-        if (!successfulMembers.isEmpty()) {
-          StringBuilder successMessageBuilder = new StringBuilder();
+    if (runTimeDistributionConfigAttributes.isEmpty() && rumTimeCacheAttributes.isEmpty()) {
+      return ResultBuilder
+          .createUserErrorResult(CliStrings.ALTER_RUNTIME_CONFIG__RELEVANT__OPTION__MESSAGE);
+    }
 
-          successMessageBuilder.append(CliStrings.ALTER_RUNTIME_CONFIG__SUCCESS__MESSAGE);
-          successMessageBuilder.append(lineSeparator);
+    Map<String, String> allRunTimeAttributes = new HashMap<>();
+    allRunTimeAttributes.putAll(runTimeDistributionConfigAttributes);
+    allRunTimeAttributes.putAll(rumTimeCacheAttributes);
 
-          for (String member : successfulMembers) {
-            successMessageBuilder.append(member);
-            successMessageBuilder.append(lineSeparator);
-          }
+    ResultCollector<?, ?> rc =
+        CliUtil.executeFunction(alterRunTimeConfigFunction, allRunTimeAttributes, targetMembers);
+    List<CliFunctionResult> results = CliFunctionResult.cleanResults((List<?>) rc.getResult());
+    Set<String> successfulMembers = new TreeSet<>();
+    Set<String> errorMessages = new TreeSet<>();
 
-          Properties properties = new Properties();
-          properties.putAll(runTimeDistributionConfigAttributes);
-
-          Result result = ResultBuilder.createInfoResult(successMessageBuilder.toString());
-
-          // Set the Cache attributes to be modified
-          final XmlEntity xmlEntity = XmlEntity.builder().withType(CacheXml.CACHE)
-              .withAttributes(rumTimeCacheAttributes).build();
-          persistClusterConfiguration(result,
-              () -> getSharedConfiguration().modifyXmlAndProperties(properties, xmlEntity,
-                  group != null ? group.split(",") : null));
-          return result;
-        } else {
-          StringBuilder errorMessageBuilder = new StringBuilder();
-          errorMessageBuilder.append("Following errors occurred while altering runtime config");
-          errorMessageBuilder.append(lineSeparator);
-
-          for (String errorMessage : errorMessages) {
-            errorMessageBuilder.append(errorMessage);
-            errorMessageBuilder.append(lineSeparator);
-          }
-          return ResultBuilder.createUserErrorResult(errorMessageBuilder.toString());
-        }
+    for (CliFunctionResult result : results) {
+      if (result.getThrowable() != null) {
+        logger.info("Function failed: " + result.getThrowable());
+        errorMessages.add(result.getThrowable().getMessage());
       } else {
-        return ResultBuilder
-            .createUserErrorResult(CliStrings.ALTER_RUNTIME_CONFIG__RELEVANT__OPTION__MESSAGE);
+        successfulMembers.add(result.getMemberIdOrName());
       }
-    } catch (CommandResultException crex) {
-      return crex.getResult();
-    } catch (CacheClosedException e) {
-      return ResultBuilder.createGemFireErrorResult(e.getMessage());
-    } catch (FunctionInvocationTargetException e) {
-      return ResultBuilder.createGemFireErrorResult(CliStrings
-          .format(CliStrings.COULD_NOT_EXECUTE_COMMAND_TRY_AGAIN, CliStrings.ALTER_RUNTIME_CONFIG));
-    } catch (Exception e) {
-      return ResultBuilder.createGemFireErrorResult(
-          CliStrings.format(CliStrings.EXCEPTION_CLASS_AND_MESSAGE, e.getClass(), e.getMessage()));
+    }
+    final String lineSeparator = System.getProperty("line.separator");
+    if (!successfulMembers.isEmpty()) {
+      StringBuilder successMessageBuilder = new StringBuilder();
+
+      successMessageBuilder.append(CliStrings.ALTER_RUNTIME_CONFIG__SUCCESS__MESSAGE);
+      successMessageBuilder.append(lineSeparator);
+
+      for (String member : successfulMembers) {
+        successMessageBuilder.append(member);
+        successMessageBuilder.append(lineSeparator);
+      }
+
+      Properties properties = new Properties();
+      properties.putAll(runTimeDistributionConfigAttributes);
+
+      Result result = ResultBuilder.createInfoResult(successMessageBuilder.toString());
+
+      // Set the Cache attributes to be modified
+      final XmlEntity xmlEntity = XmlEntity.builder().withType(CacheXml.CACHE)
+          .withAttributes(rumTimeCacheAttributes).build();
+      persistClusterConfiguration(result,
+          () -> getSharedConfiguration().modifyXmlAndProperties(properties, xmlEntity, group));
+      return result;
+    } else {
+      StringBuilder errorMessageBuilder = new StringBuilder();
+      errorMessageBuilder.append("Following errors occurred while altering runtime config");
+      errorMessageBuilder.append(lineSeparator);
+
+      for (String errorMessage : errorMessages) {
+        errorMessageBuilder.append(errorMessage);
+        errorMessageBuilder.append(lineSeparator);
+      }
+      return ResultBuilder.createUserErrorResult(errorMessageBuilder.toString());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/ca12f781/geode-core/src/main/java/org/apache/geode/management/internal/cli/functions/AlterRuntimeConfigFunction.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/functions/AlterRuntimeConfigFunction.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/functions/AlterRuntimeConfigFunction.java
index 1d3e5f5..53d3ab7 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/functions/AlterRuntimeConfigFunction.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/functions/AlterRuntimeConfigFunction.java
@@ -14,10 +14,6 @@
  */
 package org.apache.geode.management.internal.cli.functions;
 
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
 import org.apache.geode.cache.CacheClosedException;
 import org.apache.geode.cache.CacheFactory;
 import org.apache.geode.cache.execute.FunctionAdapter;
@@ -26,13 +22,21 @@ import org.apache.geode.distributed.internal.DistributionConfig;
 import org.apache.geode.internal.ConfigSource;
 import org.apache.geode.internal.InternalEntity;
 import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.management.internal.cli.CliUtil;
 import org.apache.geode.management.internal.cli.i18n.CliStrings;
+import org.apache.logging.log4j.Logger;
+
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
 
 public class AlterRuntimeConfigFunction extends FunctionAdapter implements InternalEntity {
 
   private static final long serialVersionUID = 1L;
 
+  private static Logger logger = LogService.getLogger();
+
   private InternalCache getCache() {
     return (InternalCache) CacheFactory.getAnyInstance();
   }
@@ -78,6 +82,7 @@ public class AlterRuntimeConfigFunction extends FunctionAdapter implements Inter
       context.getResultSender().lastResult(result);
 
     } catch (Exception e) {
+      logger.error("Exception happened on : " + memberId, e);
       CliFunctionResult cliFuncResult =
           new CliFunctionResult(memberId, e, CliUtil.stackTraceAsString(e));
       context.getResultSender().lastResult(cliFuncResult);

http://git-wip-us.apache.org/repos/asf/geode/blob/ca12f781/geode-core/src/test/java/org/apache/geode/distributed/internal/InternalLocatorIntegrationTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/distributed/internal/InternalLocatorIntegrationTest.java b/geode-core/src/test/java/org/apache/geode/distributed/internal/InternalLocatorIntegrationTest.java
new file mode 100644
index 0000000..356c79f
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/distributed/internal/InternalLocatorIntegrationTest.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.geode.distributed.internal;
+
+import static org.apache.geode.distributed.ConfigurationProperties.LOG_FILE;
+import static org.apache.geode.distributed.ConfigurationProperties.NAME;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import org.apache.geode.distributed.Locator;
+import org.apache.geode.internal.AvailablePortHelper;
+import org.apache.geode.internal.logging.log4j.LogWriterAppender;
+import org.apache.geode.internal.logging.log4j.LogWriterAppenders;
+import org.apache.geode.test.junit.categories.IntegrationTest;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.Properties;
+
+@Category(IntegrationTest.class)
+public class InternalLocatorIntegrationTest {
+
+  private Locator locator;
+  private LogWriterAppender appender;
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+  @Test
+  public void testLogWriterAppenderShouldBeRemovedForALocatorWithNoDS() throws Exception {
+    Properties properties = new Properties();
+    properties.setProperty(NAME, "testVM");
+    properties.setProperty(LOG_FILE, temporaryFolder.newFile("testVM.log").getAbsolutePath());
+
+    int port = AvailablePortHelper.getRandomAvailableTCPPort();
+    locator =
+        InternalLocator.startLocator(port, null, null, null, null, null, false, properties, null);
+
+    appender = LogWriterAppenders.getAppender(LogWriterAppenders.Identifier.MAIN);
+    assertThat(appender).isNotNull();
+
+    locator.stop();
+
+    appender = LogWriterAppenders.getAppender(LogWriterAppenders.Identifier.MAIN);
+    assertThat(appender).isNull();
+  }
+
+  @Test
+  public void testLogWriterAppenderShouldBeRemovedForALocatorWithDS() throws Exception {
+    Properties properties = new Properties();
+    properties.setProperty(NAME, "testVM");
+    properties.setProperty(LOG_FILE, temporaryFolder.newFile("testVM.log").getAbsolutePath());
+
+    int port = AvailablePortHelper.getRandomAvailableTCPPort();
+    locator = InternalLocator.startLocatorAndDS(port, null, properties);
+
+    appender = LogWriterAppenders.getAppender(LogWriterAppenders.Identifier.MAIN);
+    assertThat(appender).isNotNull();
+
+    locator.stop();
+
+    appender = LogWriterAppenders.getAppender(LogWriterAppenders.Identifier.MAIN);
+    assertThat(appender).isNull();
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/ca12f781/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ConfigCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ConfigCommandsDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ConfigCommandsDUnitTest.java
index edec00a..a110025 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ConfigCommandsDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ConfigCommandsDUnitTest.java
@@ -59,6 +59,7 @@ import org.apache.geode.management.internal.cli.remote.CommandProcessor;
 import org.apache.geode.management.internal.cli.result.CommandResult;
 import org.apache.geode.management.internal.cli.util.CommandStringBuilder;
 import org.apache.geode.test.dunit.Host;
+import org.apache.geode.test.dunit.IgnoredException;
 import org.apache.geode.test.dunit.SerializableCallable;
 import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
@@ -331,9 +332,10 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
         commandProcessor.createCommandStatement("alter runtime", Collections.EMPTY_MAP).process();
   }
 
-  @Category(FlakyTest.class) // GEODE-1313
   @Test
   public void testAlterRuntimeConfigRandom() throws Exception {
+    IgnoredException.addIgnoredException(
+        "java.lang.IllegalArgumentException: Could not set \"log-disk-space-limit\"");
     final String member1 = "VM1";
     final String controller = "controller";
 
@@ -352,7 +354,7 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
         Properties localProps = new Properties();
         localProps.setProperty(NAME, member1);
         getSystem(localProps);
-        Cache cache = getCache();
+        getCache();
       }
     });
 
@@ -360,9 +362,6 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
     CommandResult cmdResult = executeCommand(csb.getCommandString());
     String resultAsString = commandResultToString(cmdResult);
 
-    getLogWriter().info("#SB Result\n");
-    getLogWriter().info(resultAsString);
-
     assertEquals(true, cmdResult.getStatus().equals(Status.ERROR));
     assertTrue(resultAsString.contains(CliStrings.ALTER_RUNTIME_CONFIG__RELEVANT__OPTION__MESSAGE));
 
@@ -371,10 +370,9 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
     cmdResult = executeCommand(csb.getCommandString());
     resultAsString = commandResultToString(cmdResult);
 
-    getLogWriter().info("#SB Result\n");
-    getLogWriter().info(resultAsString);
-
     assertEquals(true, cmdResult.getStatus().equals(Status.ERROR));
+    assertTrue(
+        resultAsString.contains("Could not set \"log-disk-space-limit\" to \"2,000,000,000\""));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/geode/blob/ca12f781/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerStartupRule.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerStartupRule.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerStartupRule.java
index 055993c..dcdc5c4 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerStartupRule.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/LocatorServerStartupRule.java
@@ -75,8 +75,7 @@ public class LocatorServerStartupRule extends ExternalResource implements Serial
     DUnitLauncher.closeAndCheckForSuspects();
     restoreSystemProperties.after();
     temporaryFolder.delete();
-    Arrays.stream(members).filter(Objects::nonNull)
-        .forEach(MemberVM::stopMemberAndCleanupVMIfNecessary);
+    Arrays.stream(members).filter(Objects::nonNull).forEach(MemberVM::stopMember);
   }
 
   public MemberVM<Locator> startLocatorVM(int index) throws Exception {
@@ -139,7 +138,7 @@ public class LocatorServerStartupRule extends ExternalResource implements Serial
 
   public void stopMember(int index) {
     MemberVM member = members[index];
-    member.stopMemberAndCleanupVMIfNecessary();
+    member.stopMember();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/geode/blob/ca12f781/geode-core/src/test/java/org/apache/geode/test/dunit/rules/MemberVM.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/MemberVM.java b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/MemberVM.java
index 6da824e..7e5ce1f 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/rules/MemberVM.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/rules/MemberVM.java
@@ -76,12 +76,9 @@ public class MemberVM<T extends Member> implements Member {
     return member.getName();
   }
 
-  public void stopMemberAndCleanupVMIfNecessary() {
-    stopMember();
-    cleanupVMIfNecessary();
-  }
+  public void stopMember() {
 
-  private void cleanupVMIfNecessary() {
+    this.invoke(LocatorServerStartupRule::stopMemberInThisVM);
     /**
      * The LocatorServerStarterRule may dynamically change the "user.dir" system property to point
      * to a temporary folder. The Path API caches the first value of "user.dir" that it sees, and
@@ -93,8 +90,4 @@ public class MemberVM<T extends Member> implements Member {
       this.getVM().bounce();
     }
   }
-
-  public void stopMember() {
-    this.invoke(LocatorServerStartupRule::stopMemberInThisVM);
-  }
 }


[08/32] geode git commit: GEODE-2954 Old client gets null memberID in cache listener

Posted by kl...@apache.org.
GEODE-2954 Old client gets null memberID in cache listener

I've added a new test that demonstrates that a new-version server sends
an EventID to a client that the client is unable to deserialize
completely.  It gets an error when deserializing its member ID,
causing cache listeners to get a null when requesting the ID of
the member that effected the change.

The fix is to reserialize the member ID in EventID.toData if the
destination stream is for an older version, such as a 1.1.0 client.
This ensures the proper on-wire format is used for that version of Geode.

I've also bumped up the version ordinal for 1.2 since version 59 is
marked as unusable in Version.java.

I've changed the Banner to show the version ordinal because the other
version information in the banner isn't completely trustworthy.  It
looks for a GemFireVersion.properties file on the classpath to get
this information and so it may not get it from the Geode jar file
as expected.


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/e79d27d7
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/e79d27d7
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/e79d27d7

Branch: refs/heads/feature/GEODE-1279
Commit: e79d27d7e258d2a5f0d8a3155cc1911825a90493
Parents: 096c22d
Author: Bruce Schuchardt <bs...@pivotal.io>
Authored: Wed May 24 15:13:52 2017 -0700
Committer: Bruce Schuchardt <bs...@pivotal.io>
Committed: Thu May 25 07:45:18 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/geode/internal/Banner.java  |   2 +
 .../java/org/apache/geode/internal/Version.java |   4 +-
 .../apache/geode/internal/cache/EventID.java    |  11 +-
 .../sockets/ClientServerMiscBCDUnitTest.java    |  44 +++
 .../tier/sockets/ClientServerMiscDUnitTest.java | 358 ++++++++-----------
 .../cli/commands/ShowDeadlockDUnitTest.java     |   0
 .../sanctionedDataSerializables.txt             |   8 +-
 7 files changed, 215 insertions(+), 212 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/e79d27d7/geode-core/src/main/java/org/apache/geode/internal/Banner.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/Banner.java b/geode-core/src/main/java/org/apache/geode/internal/Banner.java
index b6a89bf..a218a5b 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/Banner.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/Banner.java
@@ -104,6 +104,8 @@ public class Banner {
 
     GemFireVersion.print(out);
 
+    out.println("Communications version: " + Version.CURRENT_ORDINAL);
+
     out.println("Process ID: " + processId);
     out.println("User: " + sp.get("user.name"));
     sp.remove("user.name");

http://git-wip-us.apache.org/repos/asf/geode/blob/e79d27d7/geode-core/src/main/java/org/apache/geode/internal/Version.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/Version.java b/geode-core/src/main/java/org/apache/geode/internal/Version.java
index 1c131e8..5576971 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/Version.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/Version.java
@@ -59,7 +59,7 @@ public class Version implements Comparable<Version> {
   /** byte used as ordinal to represent this <code>Version</code> */
   private final short ordinal;
 
-  public static final int HIGHEST_VERSION = 60;
+  public static final int HIGHEST_VERSION = 65;
 
   private static final Version[] VALUES = new Version[HIGHEST_VERSION + 1];
 
@@ -190,7 +190,7 @@ public class Version implements Comparable<Version> {
   public static final Version GEODE_111 =
       new Version("GEODE", "1.1.1", (byte) 1, (byte) 1, (byte) 1, (byte) 0, GEODE_111_ORDINAL);
 
-  private static final byte GEODE_120_ORDINAL = 60;
+  private static final byte GEODE_120_ORDINAL = 65;
 
   public static final Version GEODE_120 =
       new Version("GEODE", "1.2.0", (byte) 1, (byte) 2, (byte) 0, (byte) 0, GEODE_120_ORDINAL);

http://git-wip-us.apache.org/repos/asf/geode/blob/e79d27d7/geode-core/src/main/java/org/apache/geode/internal/cache/EventID.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/EventID.java b/geode-core/src/main/java/org/apache/geode/internal/cache/EventID.java
index 87835ff..71acdc9 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/EventID.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/EventID.java
@@ -27,6 +27,7 @@ import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.concurrent.atomic.AtomicLong;
 
+import org.apache.geode.internal.InternalDataSerializer;
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.DataSerializer;
@@ -322,7 +323,15 @@ public class EventID implements DataSerializableFixedID, Serializable, Externali
   }
 
   public void toData(DataOutput dop) throws IOException {
-    DataSerializer.writeByteArray(this.membershipID, dop);
+    Version version = InternalDataSerializer.getVersionForDataStream(dop);
+    if (version.compareTo(Version.GFE_90) <= 0) {
+      InternalDistributedMember member = getDistributedMember();
+      HeapDataOutputStream hdos = new HeapDataOutputStream(version);
+      member.writeEssentialData(hdos);
+      DataSerializer.writeByteArray(hdos.toByteArray(), dop);
+    } else {
+      DataSerializer.writeByteArray(this.membershipID, dop);
+    }
     DataSerializer.writeByteArray(getOptimizedByteArrayForEventID(this.threadID, this.sequenceID),
         dop);
     dop.writeInt(this.bucketID);

http://git-wip-us.apache.org/repos/asf/geode/blob/e79d27d7/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscBCDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscBCDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscBCDUnitTest.java
index be0ac6b..d51c196 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscBCDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscBCDUnitTest.java
@@ -14,17 +14,28 @@
  */
 package org.apache.geode.internal.cache.tier.sockets;
 
+import static org.junit.Assert.assertFalse;
+
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.client.Pool;
+import org.apache.geode.internal.cache.LocalRegion;
+import org.apache.geode.test.dunit.Host;
+import org.apache.geode.test.dunit.NetworkUtils;
+import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.standalone.VersionManager;
 import org.apache.geode.test.junit.categories.BackwardCompatibilityTest;
 import org.apache.geode.test.junit.categories.ClientServerTest;
 import org.apache.geode.test.junit.categories.DistributedTest;
 import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
+import org.awaitility.Awaitility;
+import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
 import java.util.Collection;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 @Category({DistributedTest.class, ClientServerTest.class, BackwardCompatibilityTest.class})
 @RunWith(Parameterized.class)
@@ -46,4 +57,37 @@ public class ClientServerMiscBCDUnitTest extends ClientServerMiscDUnitTest {
     testVersion = version;
   }
 
+  @Test
+  public void testSubscriptionWithCurrentServerAndOldClients() throws Exception {
+    // start server first
+    int serverPort = initServerCache(true);
+    VM client1 = Host.getHost(0).getVM(testVersion, 1);
+    VM client2 = Host.getHost(0).getVM(testVersion, 3);
+    String hostname = NetworkUtils.getServerHostName(Host.getHost(0));
+    client1.invoke("create client1 cache", () -> {
+      createClientCache(hostname, serverPort);
+      populateCache();
+      registerInterest();
+    });
+    client2.invoke("create client2 cache", () -> {
+      Pool ignore = createClientCache(hostname, serverPort);
+    });
+
+    client2.invoke("putting data in client2", () -> putForClient());
+
+    // client1 will receive client2's updates asynchronously
+    client1.invoke(() -> {
+      Region r2 = getCache().getRegion(REGION_NAME2);
+      MemberIDVerifier verifier = (MemberIDVerifier) ((LocalRegion) r2).getCacheListener();
+      Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> verifier.eventReceived);
+    });
+
+    // client2's update should have included a memberID - GEODE-2954
+    client1.invoke(() -> {
+      Region r2 = getCache().getRegion(REGION_NAME2);
+      MemberIDVerifier verifier = (MemberIDVerifier) ((LocalRegion) r2).getCacheListener();
+      assertFalse(verifier.memberIDNotReceived);
+    });
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/e79d27d7/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java
index b4f3185..9ca5ab9 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java
@@ -30,6 +30,7 @@ import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheException;
 import org.apache.geode.cache.CacheWriterException;
 import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.EntryEvent;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionAttributes;
 import org.apache.geode.cache.Scope;
@@ -42,7 +43,9 @@ import org.apache.geode.cache.client.internal.Op;
 import org.apache.geode.cache.client.internal.PoolImpl;
 import org.apache.geode.cache.client.internal.RegisterInterestTracker;
 import org.apache.geode.cache.server.CacheServer;
+import org.apache.geode.cache.util.CacheListenerAdapter;
 import org.apache.geode.cache30.CacheSerializableRunnable;
+import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.distributed.DistributedSystemDisconnectedException;
 import org.apache.geode.internal.AvailablePort;
@@ -63,6 +66,7 @@ import org.apache.geode.test.dunit.standalone.VersionManager;
 import org.apache.geode.test.junit.categories.ClientServerTest;
 import org.apache.geode.test.junit.categories.DistributedTest;
 import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
+import org.awaitility.Awaitility;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
@@ -72,6 +76,7 @@ import java.util.Collection;
 import java.util.Iterator;
 import java.util.Properties;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 /**
  * Tests client server corner cases between Region and Pool
@@ -95,9 +100,9 @@ public class ClientServerMiscDUnitTest extends JUnit4CacheTestCase {
 
   private static final String server_k2 = "server-k2";
 
-  private static final String REGION_NAME1 = "ClientServerMiscDUnitTest_region1";
+  static final String REGION_NAME1 = "ClientServerMiscDUnitTest_region1";
 
-  private static final String REGION_NAME2 = "ClientServerMiscDUnitTest_region2";
+  static final String REGION_NAME2 = "ClientServerMiscDUnitTest_region2";
 
   private static final String PR_REGION_NAME = "ClientServerMiscDUnitTest_PRregion";
 
@@ -138,13 +143,13 @@ public class ClientServerMiscDUnitTest extends JUnit4CacheTestCase {
     server2 = host.getVM(3);
   }
 
-  private int initServerCache(boolean notifyBySub) {
+  int initServerCache(boolean notifyBySub) {
     Object[] args = new Object[] {notifyBySub, getMaxThreads()};
     return ((Integer) server1.invoke(ClientServerMiscDUnitTest.class, "createServerCache", args))
         .intValue();
   }
 
-  private int initServerCache2(boolean notifyBySub) {
+  int initServerCache2(boolean notifyBySub) {
     Object[] args = new Object[] {notifyBySub, getMaxThreads()};
     return ((Integer) server2.invoke(ClientServerMiscDUnitTest.class, "createServerCache", args))
         .intValue();
@@ -373,19 +378,18 @@ public class ClientServerMiscDUnitTest extends JUnit4CacheTestCase {
   public void testForTwoRegionHavingDifferentInterestList() throws Exception {
     // start server first
     PORT1 = initServerCache(true);
-    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1);
-    populateCache();
-    registerInterest();
-    server1.invoke(() -> ClientServerMiscDUnitTest.put());
+    int serverPort = PORT1;
+    VM client1 = Host.getHost(0).getVM(testVersion, 1);
+    String hostname = NetworkUtils.getServerHostName(Host.getHost(0));
+    client1.invoke("create client1 cache", () -> {
+      createClientCache(hostname, serverPort);
+      populateCache();
+      registerInterest();
+    });
 
-    // pause(5000 + 5000 + 10000);
-    /*
-     * final int maxWaitTime = Integer.getInteger(WAIT_PROPERTY, WAIT_DEFAULT).intValue(); try {
-     * Thread.yield(); Thread.sleep(maxWaitTime); } catch (InterruptedException e) {
-     * fail("interrupted"); }
-     */
-    verifyUpdates();
+    server1.invoke("putting entries in server1", () -> put());
 
+    client1.invoke(() -> verifyUpdates());
   }
 
   /**
@@ -590,65 +594,27 @@ public class ClientServerMiscDUnitTest extends JUnit4CacheTestCase {
 
     populateCache();
     server1.invoke(() -> ClientServerMiscDUnitTest.put());
-    // pause(5000);
-    WaitCriterion wc = new WaitCriterion() {
-      String excuse;
-
-      public boolean done() {
-        Object val = region1.getEntry(k1).getValue();
-        return k1.equals(val);
-      }
 
-      public String description() {
-        return excuse;
-      }
-    };
-    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
-
-    // assertIndexDetailsEquals(region1.getEntry(k1).getValue(), k1);
-    wc = new WaitCriterion() {
-      String excuse;
-
-      public boolean done() {
-        Object val = region1.getEntry(k2).getValue();
-        return k2.equals(val);
-      }
-
-      public String description() {
-        return excuse;
-      }
-    };
-    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
-
-    wc = new WaitCriterion() {
-      String excuse;
-
-      public boolean done() {
-        Object val = region2.getEntry(k1).getValue();
-        return k1.equals(val);
-      }
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = region1.getEntry(k1).getValue();
+      return k1.equals(val);
+    });
 
-      public String description() {
-        return excuse;
-      }
-    };
-    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = region1.getEntry(k2).getValue();
+      return k2.equals(val);
+    });
 
-    // assertIndexDetailsEquals(region1.getEntry(k2).getValue(), k2);
-    // assertIndexDetailsEquals(region2.getEntry(k1).getValue(), k1);
-    wc = new WaitCriterion() {
-      String excuse;
 
-      public boolean done() {
-        Object val = region2.getEntry(k2).getValue();
-        return k2.equals(val);
-      }
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = region2.getEntry(k1).getValue();
+      return k1.equals(val);
+    });
 
-      public String description() {
-        return excuse;
-      }
-    };
-    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = region2.getEntry(k2).getValue();
+      return k2.equals(val);
+    });
 
     // assertIndexDetailsEquals(region2.getEntry(k2).getValue(), k2);
   }
@@ -857,6 +823,32 @@ public class ClientServerMiscDUnitTest extends JUnit4CacheTestCase {
     return p;
   }
 
+  static class MemberIDVerifier extends CacheListenerAdapter {
+    boolean memberIDNotReceived = true;
+    boolean eventReceived = false;
+
+    @Override
+    public void afterCreate(EntryEvent event) {
+      eventReceived(event);
+    }
+
+    @Override
+    public void afterUpdate(EntryEvent event) {
+      eventReceived(event);
+    }
+
+    private void eventReceived(EntryEvent event) {
+      eventReceived = true;
+      DistributedMember memberID = event.getDistributedMember();
+      memberIDNotReceived = (memberID == null);
+    }
+
+    public void reset() {
+      memberIDNotReceived = true;
+      eventReceived = false;
+    }
+  }
+
   public static Integer createServerCache(Boolean notifyBySubscription, Integer maxThreads)
       throws Exception {
     Cache cache = new ClientServerMiscDUnitTest().createCacheV(new Properties());
@@ -893,17 +885,12 @@ public class ClientServerMiscDUnitTest extends JUnit4CacheTestCase {
     return 0;
   }
 
-  public static void registerInterest() {
-    try {
-      Cache cache = new ClientServerMiscDUnitTest().getCache();
-      Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
-      assertNotNull(r);
-      // r.registerInterestRegex(CacheClientProxy.ALL_KEYS);
-      r.registerInterest("ALL_KEYS");
-    } catch (CacheWriterException e) {
-      e.printStackTrace();
-      Assert.fail("Test failed due to CacheWriterException during registerInterest", e);
-    }
+  public static void registerInterest() throws Exception {
+    Cache cache = new ClientServerMiscDUnitTest().getCache();
+    Region r = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
+    assertNotNull(r);
+    r.registerInterest("ALL_KEYS");
+    r.getAttributesMutator().addCacheListener(new MemberIDVerifier());
   }
 
   public static void registerInterestForInvalidatesInBothTheRegions() {
@@ -1070,153 +1057,114 @@ public class ClientServerMiscDUnitTest extends JUnit4CacheTestCase {
   }
 
   public static void verifyCacheClientProxyOnServer() {
-    try {
-      Cache cache = new ClientServerMiscDUnitTest().getCache();
-      assertEquals("More than one BridgeServer", 1, cache.getCacheServers().size());
-      CacheServerImpl bs = (CacheServerImpl) cache.getCacheServers().iterator().next();
-      assertNotNull(bs);
-      assertNotNull(bs.getAcceptor());
-      final CacheClientNotifier ccn = bs.getAcceptor().getCacheClientNotifier();
-
-      assertNotNull(ccn);
-      WaitCriterion wc = new WaitCriterion() {
-        String excuse;
+    Cache cache = new ClientServerMiscDUnitTest().getCache();
+    assertEquals("More than one BridgeServer", 1, cache.getCacheServers().size());
+    CacheServerImpl bs = (CacheServerImpl) cache.getCacheServers().iterator().next();
+    assertNotNull(bs);
+    assertNotNull(bs.getAcceptor());
+    final CacheClientNotifier ccn = bs.getAcceptor().getCacheClientNotifier();
+
+    assertNotNull(ccn);
+    WaitCriterion wc = new WaitCriterion() {
+      String excuse;
 
-        public boolean done() {
-          return ccn.getClientProxies().size() == 1;
-        }
+      public boolean done() {
+        return ccn.getClientProxies().size() == 1;
+      }
 
-        public String description() {
-          return excuse;
-        }
-      };
-      Wait.waitForCriterion(wc, 40 * 1000, 1000, true);
-    } catch (Exception ex) {
-      ex.printStackTrace();
-      fail("while setting verifyNoCacheClientProxyOnServer  " + ex);
-    }
+      public String description() {
+        return excuse;
+      }
+    };
+    Wait.waitForCriterion(wc, 40 * 1000, 1000, true);
   }
 
   public static void populateCache() {
-    try {
-      Cache cache = new ClientServerMiscDUnitTest().getCache();
-      Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME1);
-      Region r2 = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
-      assertNotNull(r1);
-      assertNotNull(r2);
+    Cache cache = new ClientServerMiscDUnitTest().getCache();
+    Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME1);
+    Region r2 = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
+    assertNotNull(r1);
+    assertNotNull(r2);
 
-      if (!r1.containsKey(k1))
-        r1.create(k1, k1);
-      if (!r1.containsKey(k2))
-        r1.create(k2, k2);
-      if (!r2.containsKey(k1))
-        r2.create(k1, k1);
-      if (!r2.containsKey(k2))
-        r2.create(k2, k2);
-
-      assertEquals(r1.getEntry(k1).getValue(), k1);
-      assertEquals(r1.getEntry(k2).getValue(), k2);
-      assertEquals(r2.getEntry(k1).getValue(), k1);
-      assertEquals(r2.getEntry(k2).getValue(), k2);
-    } catch (Exception ex) {
-      Assert.fail("failed while createEntries()", ex);
-    }
+    if (!r1.containsKey(k1))
+      r1.create(k1, k1);
+    if (!r1.containsKey(k2))
+      r1.create(k2, k2);
+    if (!r2.containsKey(k1))
+      r2.create(k1, k1);
+    if (!r2.containsKey(k2))
+      r2.create(k2, k2);
+
+    assertEquals(r1.getEntry(k1).getValue(), k1);
+    assertEquals(r1.getEntry(k2).getValue(), k2);
+    assertEquals(r2.getEntry(k1).getValue(), k1);
+    assertEquals(r2.getEntry(k2).getValue(), k2);
   }
 
   public static void put() {
-    try {
-      Cache cache = new ClientServerMiscDUnitTest().getCache();
-      Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME1);
-      Region r2 = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
-      assertNotNull(r1);
-      assertNotNull(r2);
+    Cache cache = new ClientServerMiscDUnitTest().getCache();
+    Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME1);
+    Region r2 = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
+    assertNotNull(r1);
+    assertNotNull(r2);
 
-      r1.put(k1, server_k1);
-      r1.put(k2, server_k2);
+    r1.put(k1, server_k1);
+    r1.put(k2, server_k2);
 
-      r2.put(k1, server_k1);
-      r2.put(k2, server_k2);
+    r2.put(k1, server_k1);
+    r2.put(k2, server_k2);
 
-      assertEquals(r1.getEntry(k1).getValue(), server_k1);
-      assertEquals(r1.getEntry(k2).getValue(), server_k2);
-      assertEquals(r2.getEntry(k1).getValue(), server_k1);
-      assertEquals(r2.getEntry(k2).getValue(), server_k2);
-    } catch (Exception ex) {
-      Assert.fail("failed while put()", ex);
-    }
+    assertEquals(r1.getEntry(k1).getValue(), server_k1);
+    assertEquals(r1.getEntry(k2).getValue(), server_k2);
+    assertEquals(r2.getEntry(k1).getValue(), server_k1);
+    assertEquals(r2.getEntry(k2).getValue(), server_k2);
   }
 
-  public static void verifyUpdates() {
-    try {
-      Cache cache = new ClientServerMiscDUnitTest().getCache();
-      final Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME1);
-      final Region r2 = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
-      assertNotNull(r1);
-      assertNotNull(r2);
-      // verify updates
-      WaitCriterion wc = new WaitCriterion() {
-        String excuse;
-
-        public boolean done() {
-          Object val = r1.getEntry(k1).getValue();
-          return k1.equals(val);
-        }
+  public static void putForClient() {
+    Cache cache = new ClientServerMiscDUnitTest().getCache();
+    Region r2 = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
 
-        public String description() {
-          return excuse;
-        }
-      };
-      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
-
-      // assertIndexDetailsEquals(k1, r1.getEntry(k1).getValue());
-      wc = new WaitCriterion() {
-        String excuse;
-
-        public boolean done() {
-          Object val = r1.getEntry(k2).getValue();
-          return k2.equals(val);
-        }
-
-        public String description() {
-          return excuse;
-        }
-      };
-      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
+    r2.put(k1, "client2_k1");
+    r2.put(k2, "client2_k2");
+  }
 
-      // assertIndexDetailsEquals(k2, r1.getEntry(k2).getValue());
-      wc = new WaitCriterion() {
-        String excuse;
+  public static void verifyUpdates() {
+    Cache cache = new ClientServerMiscDUnitTest().getCache();
+    final Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME1);
+    final Region r2 = cache.getRegion(Region.SEPARATOR + REGION_NAME2);
+    assertNotNull(r1);
+    assertNotNull(r2);
 
-        public boolean done() {
-          Object val = r2.getEntry(k1).getValue();
-          return server_k1.equals(val);
-        }
+    // no interest registered in region1 - it should hold client values, which are
+    // the same as the keys
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = r1.getEntry(k1).getValue();
+      return k1.equals(val);
+    });
 
-        public String description() {
-          return excuse;
-        }
-      };
-      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = r1.getEntry(k2).getValue();
+      return k2.equals(val);
+    });
 
-      // assertIndexDetailsEquals(server_k1, r2.getEntry(k1).getValue());
-      wc = new WaitCriterion() {
-        String excuse;
+    // interest was registered in region2 - it should contain server values
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = r2.getEntry(k1).getValue();
+      return server_k1.equals(val);
+    });
 
-        public boolean done() {
-          Object val = r2.getEntry(k2).getValue();
-          return server_k2.equals(val);
-        }
+    Awaitility.await().atMost(60, TimeUnit.SECONDS).until(() -> {
+      Object val = r2.getEntry(k2).getValue();
+      return server_k2.equals(val);
+    });
 
-        public String description() {
-          return excuse;
-        }
-      };
-      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
+    // events should have contained a memberID
+    MemberIDVerifier verifier = (MemberIDVerifier) ((LocalRegion) r2).getCacheListener();
+    assertTrue("client should have received a listener event", verifier.eventReceived);
+    assertFalse("client received an update but the event had no member id",
+        verifier.memberIDNotReceived);
+    verifier.reset();
 
-      // assertIndexDetailsEquals(server_k2, r2.getEntry(k2).getValue());
-    } catch (Exception ex) {
-      Assert.fail("failed while verifyUpdates()", ex);
-    }
   }
 
   public static void verifyInvalidatesOnBothRegions() {

http://git-wip-us.apache.org/repos/asf/geode/blob/e79d27d7/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ShowDeadlockDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ShowDeadlockDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/commands/ShowDeadlockDUnitTest.java
old mode 100644
new mode 100755

http://git-wip-us.apache.org/repos/asf/geode/blob/e79d27d7/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
----------------------------------------------------------------------
diff --git a/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt b/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
index f2baa50..88df942 100644
--- a/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
+++ b/geode-core/src/test/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt
@@ -997,10 +997,10 @@ fromData,50,2a03b500052bb9004201003d1c9900112abb000759b70043b50004a7000e2abb0003
 toData,22,2b2ab40004c10007b9004002002ab400042bb60041b1
 
 org/apache/geode/internal/cache/EventID,4
-fromData,53,2a2bb80038b500042bb80038b800394d2a2cb8003ab500092a2cb8003ab5000b2a2bb9003b0100b5000c2a2bb9003c0100b50001b1
-fromDataPre_GFE_8_0_0_0,33,2a2bb80038b500042bb80038b800394d2a2cb8003ab500092a2cb8003ab5000bb1
-toData,44,2ab400042bb800352ab400092ab4000bb800332bb800352b2ab4000cb9003602002b2ab40001b900370200b1
-toDataPre_GFE_8_0_0_0,24,2ab400042bb800352ab400092ab4000bb800332bb80035b1
+fromData,53,2a2bb8003db500042bb8003db8003e4d2a2cb8003fb500092a2cb8003fb5000b2a2bb900400100b5000c2a2bb900410100b50001b1
+fromDataPre_GFE_8_0_0_0,33,2a2bb8003db500042bb8003db8003e4d2a2cb8003fb500092a2cb8003fb5000bb1
+toData,92,2bb800354d2cb20036b600379d00242ab600384ebb0010592cb700393a042d1904b600151904b600162bb8003aa7000b2ab400042bb8003a2ab400092ab4000bb800332bb8003a2b2ab4000cb9003b02002b2ab40001b9003c0200b1
+toDataPre_GFE_8_0_0_0,24,2ab400042bb8003a2ab400092ab4000bb800332bb8003ab1
 
 org/apache/geode/internal/cache/EventTracker$EventSeqnoHolder,2
 fromData,22,2a2bb9000e0100b500042a2bb8000fc00010b50005b1


[13/32] geode git commit: GEODE-2941 Update Pulse documentation

Posted by kl...@apache.org.
GEODE-2941 Update Pulse documentation


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/7b34cfd9
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/7b34cfd9
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/7b34cfd9

Branch: refs/heads/feature/GEODE-1279
Commit: 7b34cfd9fd7865ee30f2d1518977b3c7bce294a4
Parents: b7faa08
Author: Dave Barnes <db...@pivotal.io>
Authored: Wed May 24 17:18:58 2017 -0700
Committer: Dave Barnes <db...@pivotal.io>
Committed: Thu May 25 13:29:42 2017 -0700

----------------------------------------------------------------------
 .../source/subnavs/geode-subnav.erb             |  13 +-
 .../cluster_config/gfsh_persist.html.md.erb     |   2 +-
 .../15_minute_quickstart_gfsh.html.md.erb       |   2 +-
 .../management/jmx_manager_node.html.md.erb     |   2 +-
 .../management_system_overview.html.md.erb      |   4 +-
 .../managing/management/mm_overview.html.md.erb |   2 +-
 geode-docs/tools_modules/book_intro.html.md.erb |   2 +-
 .../gfsh/command-pages/start.html.md.erb        |   4 +-
 .../lucene_integration.html.md.erb              |   2 -
 .../pulse/chapter_overview.html.md.erb          |  49 --
 .../tools_modules/pulse/pulse-auth.html.md.erb  |  63 ++
 .../pulse/pulse-embedded.html.md.erb            |  76 ++
 .../pulse/pulse-hosted.html.md.erb              |  89 ++
 .../pulse/pulse-overview.html.md.erb            |  49 ++
 .../pulse/pulse-requirements.html.md.erb        |  34 +
 .../tools_modules/pulse/pulse-views.html.md.erb | 453 ++++++++++
 .../tools_modules/pulse/quickstart.html.md.erb  | 827 -------------------
 .../pulse/system_requirements.html.md.erb       |  35 -
 .../tools_modules/redis_adapter.html.md.erb     |   2 -
 19 files changed, 781 insertions(+), 929 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-book/master_middleman/source/subnavs/geode-subnav.erb
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/subnavs/geode-subnav.erb b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
index 12b2151..aa0faf4 100644
--- a/geode-book/master_middleman/source/subnavs/geode-subnav.erb
+++ b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
@@ -2271,19 +2271,22 @@ gfsh</a>
                         </ul>
                     </li>
                     <li class="has_submenu">
-                        <a href="/docs/guide/12/tools_modules/pulse/chapter_overview.html">Geode Pulse</a>
+                        <a href="/docs/guide/12/tools_modules/pulse/pulse-overview.html">Geode Pulse</a>
                         <ul>
                             <li>
-                                <a href="/docs/guide/12/tools_modules/pulse/quickstart.html#topic_523F6DE33FE54307BBE8F83BB7D9355D">Pulse Quick Start (Embedded Mode)</a>
+                                <a href="/docs/guide/12/tools_modules/pulse/pulse-requirements.html">Pulse System Requirements</a>
                             </li>
                             <li>
-                                <a href="/docs/guide/12/tools_modules/pulse/quickstart.html#topic_795C97B46B9843528961A094EE520782">Hosting Pulse on a Web Application Server</a>
+                                <a href="/docs/guide/12/tools_modules/pulse/pulse-embedded.html">Running Pulse in Embedded Mode (Quick Start)</a>
                             </li>
                             <li>
-                                <a href="/docs/guide/12/tools_modules/pulse/quickstart.html#topic_AC9FFAA6FB044279BAED7A3E099E07AC">Configuring Pulse Authentication</a>
+                                <a href="/docs/guide/12/tools_modules/pulse/pulse-hosted.html">Hosting Pulse on a Web Application Server</a>
                             </li>
                             <li>
-                                <a href="/docs/guide/12/tools_modules/pulse/quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404">Using Pulse Views</a>
+                                <a href="/docs/guide/12/tools_modules/pulse/pulse-auth.html">Configuring Pulse Authentication</a>
+                            </li>
+                            <li>
+                                <a href="/docs/guide/12/tools_modules/pulse/pulse-views.html">Using Pulse Views</a>
                             </li>
                         </ul>
                     </li>

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/configuring/cluster_config/gfsh_persist.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/configuring/cluster_config/gfsh_persist.html.md.erb b/geode-docs/configuring/cluster_config/gfsh_persist.html.md.erb
index 45f73f9..4e21735 100644
--- a/geode-docs/configuring/cluster_config/gfsh_persist.html.md.erb
+++ b/geode-docs/configuring/cluster_config/gfsh_persist.html.md.erb
@@ -91,7 +91,7 @@ There are some configurations that you cannot create using `gfsh`, and that you
     -   `cache-writer`
     -   `compressor`
     -   `serializer`
-    -   `instantiantor`
+    -   `instantiator`
     -   `pdx-serializer`
     
         **Note:**

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/getting_started/15_minute_quickstart_gfsh.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/getting_started/15_minute_quickstart_gfsh.html.md.erb b/geode-docs/getting_started/15_minute_quickstart_gfsh.html.md.erb
index 894b998..954dcf8 100644
--- a/geode-docs/getting_started/15_minute_quickstart_gfsh.html.md.erb
+++ b/geode-docs/getting_started/15_minute_quickstart_gfsh.html.md.erb
@@ -73,7 +73,7 @@ If you run `start locator` from gfsh without specifying the member name, gfsh wi
 
 ## <a id="topic_FE3F28ED18E145F787431EC87B676A76__section_02C79BFFB5334E78A5856AE1EB1F1F84" class="no-quick-link"></a>Step 3: Start Pulse
 
-Start up the browser-based Pulse monitoring tool. Pulse is a Web Application that provides a graphical dashboard for monitoring vital, real-time health and performance of Geode clusters, members, and regions. See [Geode Pulse](../tools_modules/pulse/chapter_overview.html).
+Start up the browser-based Pulse monitoring tool. Pulse is a Web Application that provides a graphical dashboard for monitoring vital, real-time health and performance of Geode clusters, members, and regions. See [Geode Pulse](../tools_modules/pulse/pulse-overview.html).
 
 ``` pre
 gfsh>start pulse

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/managing/management/jmx_manager_node.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/jmx_manager_node.html.md.erb b/geode-docs/managing/management/jmx_manager_node.html.md.erb
index 97ca066..d002734 100644
--- a/geode-docs/managing/management/jmx_manager_node.html.md.erb
+++ b/geode-docs/managing/management/jmx_manager_node.html.md.erb
@@ -23,7 +23,7 @@ limitations under the License.
 
 Any member can host an embedded JMX Manager, which provides a federated view of all MBeans for the distributed system. The member can be configured to be a manager at startup or anytime during its life by invoking the appropriate API calls on the ManagementService.
 
-You need to have a JMX Manager started in your distributed system in order to use Geode management and monitoring tools such as [gfsh](../../tools_modules/gfsh/chapter_overview.html) and [Geode Pulse](../../tools_modules/pulse/chapter_overview.html).
+You need to have a JMX Manager started in your distributed system in order to use Geode management and monitoring tools such as [gfsh](../../tools_modules/gfsh/chapter_overview.html) and [Geode Pulse](../../tools_modules/pulse/pulse-overview.html).
 
 **Note:**
 Each node that acts as the JMX Manager has additional memory requirements depending on the number of resources that it is managing and monitoring. Being a JMX Manager can increase the memory footprint of any process, including locator processes. See [Memory Requirements for Cached Data](../../reference/topics/memory_requirements_for_cache_data.html#calculating_memory_requirements) for more information on calculating memory overhead on your Geode processes.

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/managing/management/management_system_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/management_system_overview.html.md.erb b/geode-docs/managing/management/management_system_overview.html.md.erb
index ecf69f5..13a3de0 100644
--- a/geode-docs/managing/management/management_system_overview.html.md.erb
+++ b/geode-docs/managing/management/management_system_overview.html.md.erb
@@ -105,8 +105,8 @@ You can also execute gfsh commands using the ManagementService API. See [Executi
 This section lists the currently available tools for managing and monitoring Geode:
 
 -   **gfsh**. Apache Geode command-line interface that provides a simple & powerful command shell that supports the administration, debugging and deployment of Geode applications. It features context sensitive help, scripting and the ability to invoke any commands from within the application using a simple API. See [gfsh](../../tools_modules/gfsh/chapter_overview.html).
--   **Geode Pulse**. Easy-to-use, browser-based dashboard for monitoring Geode deployments. Geode Pulse provides an integrated view of all Geode members within a distributed system. See [Geode Pulse](../../tools_modules/pulse/chapter_overview.html).
--   **Pulse Data Browser**. This Geode Pulse utility provides a graphical interface for performing OQL ad-hoc queries in a Geode distributed system. See [Data Browser](../../tools_modules/pulse/quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser).
+-   **Geode Pulse**. Easy-to-use, browser-based dashboard for monitoring Geode deployments. Geode Pulse provides an integrated view of all Geode members within a distributed system. See [Geode Pulse](../../tools_modules/pulse/pulse-overview.html).
+-   **Pulse Data Browser**. This Geode Pulse utility provides a graphical interface for performing OQL ad-hoc queries in a Geode distributed system. See [Data Browser](../../tools_modules/pulse/pulse-views.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser).
 -   **Other Java Monitoring Tools such as JConsole and jvisualvm.** JConsole is a JMX-based management and monitoring tool provided in the Java 2 Platform that provides information on the performance and consumption of resources by Java applications. See [http://docs.oracle.com/javase/6/docs/technotes/guides/management/jconsole.html](http://docs.oracle.com/javase/6/docs/technotes/guides/management/jconsole.html). **Java VisualVM (jvisualvm)** is a profiling tool for analyzing your Java Virtual Machine. Java VisualVM is useful to Java application developers to troubleshoot applications and to monitor and improve the applications' performance. Java VisualVM can allow developers to generate and analyse heap dumps, track down memory leaks, perform and monitor garbage collection, and perform lightweight memory and CPU profiling. For more details on using jvisualvm, see [http://docs.oracle.com/javase/6/docs/technotes/tools/share/jvisualvm.html](http://docs.oracle.com/javase/6/docs/technot
 es/tools/share/jvisualvm.html).
 
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/managing/management/mm_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/management/mm_overview.html.md.erb b/geode-docs/managing/management/mm_overview.html.md.erb
index 89a8df5..21967cb 100644
--- a/geode-docs/managing/management/mm_overview.html.md.erb
+++ b/geode-docs/managing/management/mm_overview.html.md.erb
@@ -85,7 +85,7 @@ Geode Pulse is a Web Application that provides a graphical dashboard for monitor
 
 Use Pulse to examine total memory, CPU, and disk space used by members, uptime statistics, client connections, and critical notifications. Pulse communicates with a Geode JMX manager to provide a complete view of your Geode deployment.
 
-See [Geode Pulse](../../tools_modules/pulse/chapter_overview.html).
+See [Geode Pulse](../../tools_modules/pulse/pulse-overview.html).
 
 ## JConsole
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/book_intro.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/book_intro.html.md.erb b/geode-docs/tools_modules/book_intro.html.md.erb
index e7390c5..2bf0930 100644
--- a/geode-docs/tools_modules/book_intro.html.md.erb
+++ b/geode-docs/tools_modules/book_intro.html.md.erb
@@ -35,7 +35,7 @@ limitations under the License.
 
     The Apache Geode HTTP Session Management modules provide fast, scalable, and reliable session replication for HTTP servers without requiring application changes.
 
--   **[Geode Pulse](pulse/chapter_overview.html)**
+-   **[Geode Pulse](pulse/pulse-overview.html)**
 
     Geode Pulse is a Web Application that provides a graphical dashboard for monitoring vital, real-time health and performance of Geode clusters, members, and regions.
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/gfsh/command-pages/start.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/gfsh/command-pages/start.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/start.html.md.erb
index e2a4edc..0bec322 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/start.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/start.html.md.erb
@@ -458,7 +458,7 @@ Cluster configuration service is up and running.
 
 Launch the Geode Pulse monitoring dashboard tool in the user's default system browser and navigates the user to the landing page (login page).
 
-For more information on Geode Pulse, see [Geode Pulse](../../pulse/chapter_overview.html).
+For more information on Geode Pulse, see [Geode Pulse](../../pulse/pulse-overview.html).
 
 **Availability:** Online or offline.
 
@@ -483,7 +483,7 @@ start pulse
 start pulse --url=http://gemfire.example.com:7070/pulse
 ```
 
-**Sample Output:** See [Geode Pulse](../../pulse/chapter_overview.html) for examples of Pulse.
+**Sample Output:** See [Geode Pulse](../../pulse/pulse-overview.html) for examples of Pulse.
 
 ## <a id="topic_3764EE2DB18B4AE4A625E0354471738A" class="no-quick-link"></a>start server
 

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/lucene_integration.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/lucene_integration.html.md.erb b/geode-docs/tools_modules/lucene_integration.html.md.erb
index b83705b..7f5afdc 100644
--- a/geode-docs/tools_modules/lucene_integration.html.md.erb
+++ b/geode-docs/tools_modules/lucene_integration.html.md.erb
@@ -1,8 +1,6 @@
 ---
 title: Apache Lucene&reg; Integration
 ---
-<a id="topic_523F6DE33FE54307BBE8F83BB7D9355D"></a>
-
 <!--
 Licensed to the Apache Software Foundation (ASF) under one or more
 contributor license agreements.  See the NOTICE file distributed with

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/chapter_overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/chapter_overview.html.md.erb b/geode-docs/tools_modules/pulse/chapter_overview.html.md.erb
deleted file mode 100644
index 19de8f1..0000000
--- a/geode-docs/tools_modules/pulse/chapter_overview.html.md.erb
+++ /dev/null
@@ -1,49 +0,0 @@
----
-title:  Geode Pulse
----
-
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Geode Pulse is a Web Application that provides a graphical dashboard for monitoring vital, real-time health and performance of Geode clusters, members, and regions.
-
-Use Pulse to examine total memory, CPU, and disk space used by members, uptime statistics, client connections, WAN connections, and critical notifications. Pulse communicates with a Geode JMX manager to provide a complete view of your Geode deployment. You can drill down from a high-level cluster view to examine individual members and even regions within a member, to filter the type of information and level of detail.
-
-By default, Geode Pulse runs in an embedded container within a Geode JMX manager node. You can optionally deploy Pulse to a Web application server of your choice, so that the tool runs independently of your Geode clusters. Hosting Pulse on an application server also enables you to use SSL for accessing the application.
-
--   **[Pulse System Requirements](system_requirements.html)**
-
-    Verify that your system meets the installation and runtime requirements for GemFire Pulse.
-
-
--   **[Pulse Quick Start (Embedded Mode)](quickstart.html#topic_523F6DE33FE54307BBE8F83BB7D9355D)**
-
-    Use Pulse in embedded mode to monitor a Geode deployment directly from a Geode JMX Manager. By default, the embedded Pulse application connects to the local JMX Manager that hosts the Pulse application. Optionally, configure Pulse to connect to a Geode system of your choice.
-
--   **[Hosting Pulse on a Web Application Server](quickstart.html#topic_795C97B46B9843528961A094EE520782)**
-
-    Host Pulse on a dedicated Web application server to make the Pulse application available at a consistent address, or to use SSL for accessing the Pulse application. When you host Pulse in this way, you also configure Pulse to connect to a specific locator or JMX Manager node for monitoring.
-
--   **[Configuring Pulse Authentication](quickstart.html#topic_AC9FFAA6FB044279BAED7A3E099E07AC)**
-
-    Pulse requires all users to authenticate themselves before they can use the Pulse Web application. If you have configured JMX authentication on the Geode JMX Manager node, the Pulse Web application itself may also need to authenticate itself to the Geode JMX Manager node on startup.
-
--   **[Using Pulse Views](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404)**
-
-    Pulse provides a variety of different views to help you monitor Geode clusters, members, and regions.
-
-

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb b/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb
new file mode 100644
index 0000000..d834592
--- /dev/null
+++ b/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb
@@ -0,0 +1,63 @@
+---
+title: Configuring Pulse Authentication
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+Pulse requires all users to authenticate themselves before they can use the Pulse Web application.
+
+If you run Pulse in embedded mode, the Pulse application runs on the JMX Manager node and no JMX authentication is required. You do not need to specify valid JMX credentials to start an embedded Pulse application.
+
+If you host Pulse on a Web Application server (non-embedded mode) and you configure JMX authentication on the Geode manager node, then the Pulse Web application must authenticate itself with the manager node when it starts. Specify the credentials of a valid JMX user account in the `pulse.properties` file, as described in [Hosting Pulse on a Web Application Server](pulse-hosted.html).
+
+**Note:**
+The credentials that you specify must have both read and write privileges in the JMX Manager node. See [Configuring a JMX Manager](../../managing/management/jmx_manager_operations.html#topic_263072624B8D4CDBAD18B82E07AA44B6).
+
+# Configuring Pulse to Use HTTPS
+
+You can configure Pulse to use HTTPS in either embedded or non-embedded mode.
+
+In non-embedded mode where you are running Pulse on a standalone Web application server, you must use the Web server's SSL configuration to make the HTTP requests secure.
+
+In embedded mode, Geode uses an embedded Jetty server to host the
+Pulse Web application. To make the embedded server use HTTPS, you must
+enable the `http` SSL component in
+`gemfire.properties` or `gfsecurity-properties`.
+See [SSL](../../managing/security/ssl_overview.html) for details on configuring these parameters.
+
+These SSL parameters apply to all HTTP services hosted on the JMX Manager, which includes the following:
+
+-   Developer REST API service
+-   Management REST API service (for remote cluster management)
+-   Pulse monitoring tool
+
+When the `http` SSL component is enabled, all HTTP services become
+SSL-enabled and you must configure your client applications
+accordingly. For SSL-enabled Pulse, you will need to configure your
+browsers with proper certificates.
+
+If a JMX manager or locator is configured to use SSL, you can configure Pulse to connect to these
+processes. Create a file named `pulsesecurity.properties` and save it somewhere in the classpath of
+your Web application server. Include standard Java SSL properties, such as:
+
+```
+javax.net.ssl.keyStore={KeyStorePath}
+javax.net.ssl.keyStorePassword={KeyStorePassword}
+javax.net.ssl.trustStore={TrustStorePath}
+javax.net.ssl.trustStorePassword={TrustStorePassword}
+```

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/pulse-embedded.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-embedded.html.md.erb b/geode-docs/tools_modules/pulse/pulse-embedded.html.md.erb
new file mode 100644
index 0000000..955e554
--- /dev/null
+++ b/geode-docs/tools_modules/pulse/pulse-embedded.html.md.erb
@@ -0,0 +1,76 @@
+---
+title: Running Pulse in Embedded Mode (Quick Start)
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+Use Pulse in embedded mode to monitor a Geode deployment directly from a Geode JMX Manager. By
+default, the embedded Pulse application connects to the local JMX Manager that hosts the Pulse
+application. Optionally, configure Pulse to connect to a Geode system of your choice.
+
+To run Pulse in embedded mode:
+
+1.  Configure a Geode member to run as a JMX Manager node, specifying the HTTP port on which you
+will access the Pulse Web application (port 7070 by default). For example, the following command
+starts a Geode locator as a JMX Manager node, using the default HTTP port 7070 for the Pulse
+application:
+
+    ``` pre
+    gfsh
+    gfsh> start locator --name=loc1
+    ```
+
+    **Note:**
+    Geode locators become JMX Manager nodes by default. To start a non-locator member as a JMX
+    Manager node, include the `--J=-Dgemfire.jmx-manager=true` option. To specify a non-default port
+    number for the HTTP service that hosts the Pulse application, include the
+    `--J=-Dgemfire.http-service-port=port_number` option when starting the JMX Manager node.
+
+    When the JMX Manager node boots, it starts an embedded Jetty instance and deploys the Pulse Web
+    application at the specified or default HTTP port or 7070 by default.
+
+    `gfsh` automatically connects to the manager when you start it in this way. If you already
+    started a manager process earlier, use the `connect` command in `gfsh` to connect to that
+    process.
+
+2.  Access the embedded Pulse application from a Web browser. If you are connected to the Geode
+cluster using gfsh, use the `start pulse` command to load the correct URL in your browser:
+
+    ``` pre
+    gfsh> start pulse
+    ```
+
+    Or, enter the URL http://*address*:*http-service-port*/pulse directly in your Web browser,
+    substituting the address and HTTP port of the manager. For example, you access Pulse on the
+    local locator machine from Step 1 at the URL http://localhost:7070/pulse.
+
+3.  If you have configured authentication for the Pulse application, enter the username and password
+of a valid Pulse account in the login screen. Otherwise, enter the default "admin" in both
+fields. Click **Sign In** to continue.
+
+    See [Configuring Pulse Authentication](pulse-auth.html).
+
+4.  After you log in, Pulse displays the main cluster view for the local distributed system. See
+[Using Pulse Views](pulse-views.html).
+
+**Note:**
+When running in embedded mode, the Pulse application connects only to the JMX Manager running in the
+locator or member that hosts Pulse. This enables you to monitor all members of that distributed
+system. You can also view (but not monitor) connected WAN clusters, and can view gateway senders and
+receivers that are configured in the local cluster.
+

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb b/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
new file mode 100644
index 0000000..ceed530
--- /dev/null
+++ b/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
@@ -0,0 +1,89 @@
+---
+title: Hosting Pulse on a Web Application Server
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+Host Pulse on a dedicated Web application server to make the Pulse application available at a consistent address, or to use SSL for accessing the Pulse application. When you host Pulse in this way, you also configure Pulse to connect to a specific locator or JMX Manager node for monitoring.
+
+To host Pulse on a Web application server:
+
+1.  Set the `http-service-port` property to zero (`-Dgemfire.http-service-port=0`) when you start your Geode JMX Manager nodes. Setting this property to zero disables the embedded Web server for hosting the Pulse application.
+2.  Create a `pulse.properties` file somewhere in the classpath of your Web application server. For example, if you are hosting Pulse on Tomcat, create the `pulse.properties` file in the `$TOMCAT_SERVER/lib` directory.
+
+3.  Define the following configuration properties in the `pulse.properties` file:
+
+    <table>
+    <colgroup>
+    <col width="50%" />
+    <col width="50%" />
+    </colgroup>
+    <thead>
+    <tr class="header">
+    <th>Property</th>
+    <th>Description</th>
+    </tr>
+    </thead>
+    <tbody>
+    <tr class="odd">
+    <td><code class="ph codeph">pulse.useLocator</code></td>
+    <td>Specify &quot;true&quot; to configure Pulse to connect to a Geode Locator member, or &quot;false&quot; to connect directly to a JMX Manager.
+    <p>When Pulse connects to a Geode locator, the locator provides the address and port of an available JMX Manager to use for monitoring the distributed system. In most production deployments, you should connect Pulse to a locator instance; this allows Pulse to provide monitoring services using any available JMX Manager.</p>
+    <p>If you specify &quot;false,&quot; Pulse connects directly to a specific JMX Manager. If this manager is not available, the Pulse connection fails, even if another JMX Manager is available in the distributed system.</p></td>
+    </tr>
+    <tr class="even">
+    <td><code class="ph codeph">pulse.host</code></td>
+    <td>Specify the DNS name or IP address of the Geode locator or JMX Manager machine to which Pulse should connect. You specify either a locator or JMX Manager address depending on how you configured the <code class="ph codeph">pulse.useLocator</code> property.</td>
+    </tr>
+    <tr class="odd">
+    <td><code class="ph codeph">pulse.port</code></td>
+    <td>Specify the port number of the Geode locator or the HTTP port number of the JMX Manager to which Pulse should connect. You specify either a locator or JMX Manager port depending on how you configured the <code class="ph codeph">pulse.useLocator</code> property.
+    <p>If you configured <code class="ph codeph">pulse.useLocator=false</code>, then <code class="ph codeph">pulse.port</code> must correspond to the <code class="ph codeph">http-service-port</code> setting of the JMX Manager.</p></td>
+    </tr>
+    </tbody>
+    </table>
+
+    For example, with this configuration Pulse connects to the locator at mylocator\[10334\] and accesses any available JMX Manager:
+
+    ``` pre
+    pulse.useLocator=true
+    pulse.host=locsrv.gemstone.com
+    pulse.port=10334
+    ```
+
+    With this configuration Pulse accesses only the JMX Manager instance at manager1\[8080\]:
+
+    ``` pre
+    pulse.useLocator=false
+    pulse.host=jmxsrv.gemstone.com
+    pulse.port=8080
+    ```
+
+4.  (Optional.) Configure authentication for the Pulse Web application using the instructions in [Configuring Pulse Authentication](pulse-auth.html).
+
+5.  Deploy the Pulse Web application to your application server. Geode installs the `pulse.war` file in the `tools/Pulse` subdirectory of your Geode installation directory. Depending on your application server, you may need to copy the `pulse.war` file to a deployment directory or use a configuration tool to deploy the file.
+6.  Access the Pulse application using the address, port, and application URL that you configure in your Web application server. For example, with Tomcat the default URL is http://*address*:8080/pulse. Your application server provides options for configuring the address, port, and application name; substitute the correct items to access the deployed Pulse application.
+
+    Pulse connects to the locator or JMX Manager that you configured in the `pulse.properties` file, authenticating using the credentials that you configured in the file.
+
+7.  If you have configured authentication for the Pulse application, enter the username and password of a valid Pulse account in the login screen. Otherwise, enter the default "admin" in both fields. Click **Sign In** to continue.
+
+    See [Configuring Pulse Authentication](pulse-auth.html).
+
+8.  After you log in, Pulse displays the main cluster view for the distributed system to which it has connected. See [Using Pulse Views](pulse-views.html).
+

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/pulse-overview.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-overview.html.md.erb b/geode-docs/tools_modules/pulse/pulse-overview.html.md.erb
new file mode 100644
index 0000000..ec723d2
--- /dev/null
+++ b/geode-docs/tools_modules/pulse/pulse-overview.html.md.erb
@@ -0,0 +1,49 @@
+---
+title:  Geode Pulse
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+Geode Pulse is a Web Application that provides a graphical dashboard for monitoring vital, real-time health and performance of Geode clusters, members, and regions.
+
+Use Pulse to examine total memory, CPU, and disk space used by members, uptime statistics, client connections, WAN connections, and critical notifications. Pulse communicates with a Geode JMX manager to provide a complete view of your Geode deployment. You can drill down from a high-level cluster view to examine individual members and even regions within a member, to filter the type of information and level of detail.
+
+By default, Geode Pulse runs in an embedded container within a Geode JMX manager node. You can optionally deploy Pulse to a Web application server of your choice, so that the tool runs independently of your Geode clusters. Hosting Pulse on an application server also enables you to use SSL for accessing the application.
+
+-   **[Pulse System Requirements](pulse-requirements.html)**
+
+    Verify that your system meets the installation and runtime requirements for GemFire Pulse.
+
+
+-   **[Running Pulse in Embedded Mode (Quick Start)](pulse-embedded.html)**
+
+    Use Pulse in embedded mode to monitor a Geode deployment directly from a Geode JMX Manager. By default, the embedded Pulse application connects to the local JMX Manager that hosts the Pulse application. Optionally, configure Pulse to connect to a Geode system of your choice.
+
+-   **[Hosting Pulse on a Web Application Server](pulse-hosted.html)**
+
+    Host Pulse on a dedicated Web application server to make the Pulse application available at a consistent address, or to use SSL for accessing the Pulse application. When you host Pulse in this way, you also configure Pulse to connect to a specific locator or JMX Manager node for monitoring.
+
+-   **[Configuring Pulse Authentication](pulse-auth.html)**
+
+    Pulse requires all users to authenticate themselves before they can use the Pulse Web application. If you have configured JMX authentication on the Geode JMX Manager node, the Pulse Web application itself may also need to authenticate itself to the Geode JMX Manager node on startup.
+
+-   **[Using Pulse Views](pulse-views.html)**
+
+    Pulse provides a variety of different views to help you monitor Geode clusters, members, and regions.
+
+

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/pulse-requirements.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-requirements.html.md.erb b/geode-docs/tools_modules/pulse/pulse-requirements.html.md.erb
new file mode 100644
index 0000000..1163983
--- /dev/null
+++ b/geode-docs/tools_modules/pulse/pulse-requirements.html.md.erb
@@ -0,0 +1,34 @@
+---
+title:  Pulse System Requirements
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+Verify that your system meets the installation and runtime requirements for Pulse.
+
+The Pulse Web application has been tested for compatibility with the following Web browsers:
+
+-   Internet Explorer 9.0.8112.16421
+-   Safari 5.1.7 for Windows
+-   Google Chrome 22.0.1229.79 m
+-   Mozilla Firefox 16.0.1
+
+Pulse has been tested for standalone deployment on Tomcat and Jetty.
+Pulse may work with other operating systems and browsers upon which it has not been tested.
+
+

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/pulse-views.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-views.html.md.erb b/geode-docs/tools_modules/pulse/pulse-views.html.md.erb
new file mode 100644
index 0000000..d3bb367
--- /dev/null
+++ b/geode-docs/tools_modules/pulse/pulse-views.html.md.erb
@@ -0,0 +1,453 @@
+---
+title: Using Pulse Views
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+Pulse provides a variety of different views to help you monitor Geode clusters, members, and regions.
+
+The following sections provide an overview of the main Pulse views:
+
+-   [Cluster View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8)
+-   [Member View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF)
+-   [Region View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_D151776BAC8B4704A71F37F8B5CE063D)
+-   [Data Browser](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser)
+-   [Alerts Widget](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_bfk_sc3_wn)
+
+# <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8" class="no-quick-link"></a>Cluster View
+
+The cluster view is a high-level overview of the Geode distributed system. It is displayed immediately after you log into Pulse. Information displays around the perimeter of the cluster view show statistics such as memory usage, JVM pauses, and throughput. You can use the cluster view to drill down into details for individual members and regions in the distributed system.
+
+<img src="../../images/pulse_cluster_view.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_CC7B54903DF24030850E55965CDB6EC4" class="image imageleft" width="624" />
+
+Use these basic controls while in Cluster view:
+
+1.  Click Members or Data to display information about Geode members or data regions in the distributed system.
+2.  Click the display icons to display the Geode members using icon view, block view, or table view. Note that icon view is available only when displaying Members.
+
+    For example, the following shows Geode Members displayed in table view:
+
+    <img src="../../images/member_view_list.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_npw_sq3_wn" class="image" />
+    -   While in block view or table view, click the name of a Geode member to display additional information in the [Member View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF).
+    -   Click Topology, Server Groups, or Redundancy Zones to filter the view based on all members in the topology, configured server groups, or configured redundancy zones.
+    The following shows Geode Regions displayed in table view:
+    <img src="../../images/pulse-region-detail.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_glp_1jr_54" class="image" />
+    -   While in block view or table view, click the name of a Geode region to display additional information in the [Region View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_D151776BAC8B4704A71F37F8B5CE063D).
+
+3.  While in icon view, click a host machine icon to display the Geode members on that machine.
+4.  In the Alerts pane, click the severity tabs to filter the message display by the level of severity.
+
+**Cluster View Screen Components**
+
+The following table describes the data pieces displayed on the Cluster View screen.
+
+<table>
+<colgroup>
+<col width="50%" />
+<col width="50%" />
+</colgroup>
+<thead>
+<tr class="header">
+<th>Screen Component</th>
+<th>Description</th>
+</tr>
+</thead>
+<tbody>
+<tr class="odd">
+<td><strong>Cluster Status</strong></td>
+<td>Overall status of the distributed system being monitored. Possible statuses include Normal, Warning, or Severe.</td>
+</tr>
+<tr class="even">
+<td>Total Heap</td>
+<td>Total amount of memory (in GB) allocated to the Java heap across all members.</td>
+</tr>
+<tr class="odd">
+<td>Members</td>
+<td>Total number of members in the cluster.</td>
+</tr>
+<tr class="even">
+<td>Servers</td>
+<td>Total number of servers in the cluster.</td>
+</tr>
+<tr class="odd">
+<td>Clients</td>
+<td>Total number of clients in the cluster.</td>
+</tr>
+<tr class="even">
+<td>Locators</td>
+<td>Total number of locators in the cluster.</td>
+</tr>
+<tr class="odd">
+<td>Regions</td>
+<td>Total number of regions in the cluster.</td>
+</tr>
+<tr class="even">
+<td>Functions</td>
+<td>Total number of functions registered in the cluster.</td>
+</tr>
+<tr class="odd">
+<td>Unique CQs</td>
+<td>Total number of unique CQs. Corresponds to the UNIQUE _CQ_QUERY statistic.</td>
+</tr>
+<tr class="even">
+<td>Subscriptions</td>
+<td>Total number of client event subscriptions.</td>
+</tr>
+<tr class="odd">
+<td><strong>Cluster Members</strong></td>
+<td>Graphical, block, or table view of the members in the cluster.</td>
+</tr>
+<tr class="even">
+<td>Topology</td>
+<td>Organizes cluster members by DistributedMember Id.</td>
+</tr>
+<tr class="odd">
+<td>Server Groups</td>
+<td>Organizes cluster members by server group membership. If no server groups are configured, all members appear under the &quot;Default&quot; server group.</td>
+</tr>
+<tr class="even">
+<td>Redundancy Zones</td>
+<td>Organizes cluster members by redundancy zones. If no redundancy zones are configured, all members appear under the &quot;Default&quot; zone.</td>
+</tr>
+<tr class="odd">
+<td>Host Machine</td>
+<td>When you mouse over a machine icon in Topology View, a pop-up appears with the following machine statistics:
+<ul>
+<li><em>CPU Usage</em>. Percentage of CPU being used by Geode processes on the machine.</li>
+<li><em>Memory Usage</em>. Amount of memory (in MB) being used by Geode processes.</li>
+<li><em>Load Avg</em>. Average number of threads on the host machine that are in the run queue or are waiting for disk I/O over the last minutes. Corresponds to the Linux System statistic loadAverage1. If the load average is not available, a negative value is shown.</li>
+<li><em>Sockets</em>. Number of sockets currently open on the machine.</li>
+</ul></td>
+</tr>
+<tr class="even">
+<td>Member</td>
+<td>When you mouse over a member icon in Graphical View, a pop-up appears with the following member statistics:
+<ul>
+<li><em>CPU Usage</em>. Percentage of CPU being used by the Geode member process.</li>
+<li><em>Threads</em>. Number of threads running on the member.</li>
+<li><em>JVM Pauses</em>. Number of times the JVM used by the member process has paused due to garbage collection or excessive CPU usage.</li>
+<li><em>Regions</em>. Number of regions hosted on the member process.</li>
+<li><em>Clients</em>. Number of client currently connected to the member process.</li>
+<li><em>Gateway Sender</em>. Number of gateway senders configured on the member.</li>
+<li><em>Port</em>. Server port of the cache server member where clients can connect and perform cache operations.</li>
+<li><em>GemFire Version</em>. The version of the Geode member.</li>
+</ul></td>
+</tr>
+<tr class="odd">
+<td>Member</td>
+<td>In List View, the following data fields are displayed for each member:
+<ul>
+<li><em>ID</em>. DistributedMember Id of the member.</li>
+<li><em>Name</em>. Name of the member.</li>
+<li><em>Host</em>. Hostname or IP address where the member is running.</li>
+<li><em>Heap Usage</em>. Amount of JVM heap memory being used by the member process.</li>
+<li><em>CPU Usage</em>. Percentage of CPU being used by the Geode member process.</li>
+<li><em>Uptime</em>. How long the member has been up and running.</li>
+<li><em>Clients</em>. Number of clients currently connected to the member. It will have a value only if the member acts as a CacheServer.</li>
+</ul></td>
+</tr>
+<tr class="even">
+<td><strong>Key Statistics</strong></td>
+<td>Displays a few key performance measurements of the distributed system (over the last 15 minutes).</td>
+</tr>
+<tr class="odd">
+<td>Write/Sec</td>
+<td>Number of write operations per second that have occurred across the cluster. Each put/putAll operation counts as a write; for example, a putAll of 50 entries is counted as one write.</td>
+</tr>
+<tr class="even">
+<td>Read/Sec</td>
+<td>Number of read operations per second that have occurred across the cluster.</td>
+</tr>
+<tr class="odd">
+<td>Queries/Sec</td>
+<td>Number of queries per second that have been executed across the cluster.</td>
+</tr>
+<tr class="even">
+<td><strong>No. of JVM Pauses</strong></td>
+<td>Number of times the JVM has paused during the last five minutes to perform garbage collection.</td>
+</tr>
+<tr class="odd">
+<td><strong>WAN Information</strong></td>
+<td>If you have configured gateway senders or receivers for a multi-site (WAN) deployment, this box displays whether the remote cluster is reachable (working connectivity represented by a green triangle).</td>
+</tr>
+<tr class="even">
+<td><strong>Disk Throughput</strong></td>
+<td>Total disk throughput for all disks in cluster.</td>
+</tr>
+<tr class="odd">
+<td><strong>Alerts View</strong></td>
+<td>Displays alerts for the cluster.</td>
+</tr>
+</tbody>
+</table>
+
+# <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF" class="no-quick-link"></a>Member View
+
+When you select an individual Geode member in Cluster View, Pulse displays the regions available on that member, as well as member-specific information such as the configured listen ports.
+
+<img src="../../images/pulse_member_view.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_EDBD3D333B2741DCAA5CB94719B507B7" class="image imageleft" width="624" />
+
+Use these basic controls while in Member View:
+
+1.  Click the display icons to display regions using block view or table view.
+2.  Use the drop down menu to select a specific member or search for specific members by name.
+3.  Click **Cluster View** to return to Cluster View. See [Cluster View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8).
+4.  Click **Data Browser** to query region data. See [Data Browser](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser).
+
+**Member View Screen Components**
+
+The following table describes the data elements displayed on the Member View screen.
+
+<table>
+<colgroup>
+<col width="50%" />
+<col width="50%" />
+</colgroup>
+<thead>
+<tr class="header">
+<th>Screen Component</th>
+<th>Description</th>
+</tr>
+</thead>
+<tbody>
+<tr class="odd">
+<td><strong>Member Status</strong></td>
+<td>Overall status of the member being monitored. Possible statuses include Normal, Warning, or Severe.</td>
+</tr>
+<tr class="even">
+<td>Regions</td>
+<td>Total number of regions hosted on the member.</td>
+</tr>
+<tr class="odd">
+<td>Threads</td>
+<td>Total number of threads being executed on the member.</td>
+</tr>
+<tr class="even">
+<td>Sockets</td>
+<td>Total number of sockets currently open on the member.</td>
+</tr>
+<tr class="odd">
+<td>Load Avg.</td>
+<td>Average number of threads on the member that are in the run queue or are waiting for disk I/O over the last minute. Corresponds to the Linux System statistic loadAverage1. If the load average is not available, a negative value is shown.</td>
+</tr>
+<tr class="even">
+<td>Clients</td>
+<td>Current number of client connections to the member.</td>
+</tr>
+<tr class="odd">
+<td><strong>Member Regions</strong></td>
+<td>Block or table view of the regions hosted on the member.</td>
+</tr>
+<tr class="even">
+<td>Regions</td>
+<td>When you mouse over a region in block view, a pop-up appears with the following data fields:
+<ul>
+<li><em>Name</em>. Region name.</li>
+<li><em>Type</em>. For example, REPLICATE, PARTITION.</li>
+<li><em>EntryCount</em>. Number of entries in the region.</li>
+<li><em>EntrySize</em>. The aggregate entry size (in bytes) of all entries. For replicated regions this field will only provide a value if the eviction algorithm has been set to EvictionAlgorithm#LRU_ MEMORY. All partition regions will have this value. However, the value includes redundant entries and will also count the size of all the secondary entries on the node.</li>
+</ul></td>
+</tr>
+<tr class="odd">
+<td>Regions</td>
+<td>In table view, the following fields are listed for each region:
+<ul>
+<li><em>Name</em>. Region name.</li>
+<li><em>Type</em>. For example, REPLICATE, PARTITION.</li>
+<li><em>EntryCount</em>. Number of entries in the region.</li>
+<li><em>EntrySize</em>. The aggregate entry size (in bytes) of all entries. For replicated regions this field will only provide a value if the eviction algorithm has been set to EvictionAlgorithm#LRU_ MEMORY. All partition regions will have this value. However, the value includes redundant entries and will also count the size of all the secondary entries on the node.</li>
+<li><em>Scope</em>. Scope configured for the region.</li>
+<li><em>Disk Store Name</em>. Name of disk stores (if any) associated with the region.</li>
+<li><em>Disk Synchronous</em>. True if writes to disk are set to synchronous and false if not. This field reflects the configured disk-synchronous region attribute.</li>
+<li><em>Gateway Enabled</em>. Whether gateway sender and receiver configurations have been defined on members hosting this region.</li>
+</ul></td>
+</tr>
+<tr class="even">
+<td><strong>Member Clients</strong></td>
+<td>In table view, the following fields are listed for each client:
+<ul>
+<li><em>Id</em>. DistributedMember ID of the client process.</li>
+<li><em>Name</em>. Name of the client process.</li>
+<li><em>Host</em>. Hostname or IP address of the client process.</li>
+<li><em>Connected</em>. Whether the client process is currently connected to the member.</li>
+<li><em>Queue Size</em>. The size of the queue used by server to send events in case of a subscription enabled client or a client that has continuous queries running on the server.</li>
+<li><em>CPU Usage</em>. Percentage of CPU being used by the client process.</li>
+<li><em>Uptime</em>. Amount of time the client process has been running.</li>
+<li><em>Threads</em>. Threads being used by the member clients</li>
+<li><em>Gets</em>. Total number of successful get requests completed.</li>
+<li><em>Puts</em>. Total number of successful put requests completed.</li>
+</ul></td>
+</tr>
+<tr class="odd">
+<td><strong>Key Statistics</strong></td>
+<td>Displays a few key performance measurements for the member (over the last 15 minutes).</td>
+</tr>
+<tr class="even">
+<td>% CPU Usage</td>
+<td>Percentage of CPU used by the member.</td>
+</tr>
+<tr class="odd">
+<td>Read/Sec</td>
+<td>Number of read operations per second that have occurred on the member.</td>
+</tr>
+<tr class="even">
+<td>Write/Sec</td>
+<td>Number of write operations per second that have occurred on the member. Each put/putAll operation counts as a write; for example, a putAll of 50 entries is counted as one write.</td>
+</tr>
+<tr class="odd">
+<td><strong>Memory Usage</strong></td>
+<td>Total memory used on the member in MB.</td>
+</tr>
+<tr class="even">
+<td><strong>No. of JVM Pauses</strong></td>
+<td>Number of times the JVM has paused during the last five minutes due to garbage collection or excessive CPU usage.</td>
+</tr>
+<tr class="odd">
+<td><strong>WAN Information</strong></td>
+<td>Displays cluster information. This dialog box only appears if you have configured WAN functionality (gateway senders and gateway receivers).</td>
+</tr>
+<tr class="even">
+<td><strong>Disk Throughput</strong></td>
+<td>Rate of disk writes on the member.</td>
+</tr>
+</tbody>
+</table>
+
+# <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_D151776BAC8B4704A71F37F8B5CE063D" class="no-quick-link"></a>Region View
+
+The Pulse Region View provides a comprehensive overview of all regions in the Geode distributed system:
+
+<img src="../../images/pulse_data_view.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_A533852E38654E79BE5628E938E170EB" class="image imageleft" width="624" />
+
+Use these basic controls while in Region View:
+
+1.  Click the display icons to display all members that host the region using block view or table view.
+
+    (Click the name of a member to change to that member's [Member View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF).)
+
+2.  Search for specific members that host the current region.
+3.  Hover over a member name to display information such as the region entry count, entry size, and throughput on that member.
+4.  Click [Cluster View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8) or [Data Browser](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser) to go to those screens.
+
+**Region View Screen Components**
+
+The following table describes the data elements displayed on the Region View screen.
+
+<table>
+<colgroup>
+<col width="50%" />
+<col width="50%" />
+</colgroup>
+<thead>
+<tr class="header">
+<th>Screen Component</th>
+<th>Description</th>
+</tr>
+</thead>
+<tbody>
+<tr class="odd">
+<td><strong>Region Members</strong></td>
+<td>Lists information about Geode members that host the region, either in block view or table view.</td>
+</tr>
+<tr class="even">
+<td>Region Member (Detail View)</td>
+<td>When you hover over a region member in block view, a pop-up appears with the following data fields:
+<ul>
+<li><em>Member Name</em>. The name of the Geode member hosting the region.</li>
+<li><em>EntryCount</em>. Number of entries for the region on that member.</li>
+<li><em>EntrySize</em>. The aggregate entry size (in bytes) of all entries on that member. For replicated regions this field will only provide a value if the eviction algorithm has been set to EvictionAlgorithm#LRU_ MEMORY. All partition regions will have this value. However, the value includes redundant entries and will also count the size of all the secondary entries on the node.</li>
+<li><em>Accessor</em>. Indicates whether the member is an accessor member.</li>
+<li><em>Reads/Writes</em>. Summary of reads and writes served from memory and from disk stores over the last 15 minutes.</li>
+</ul></td>
+</tr>
+<tr class="odd">
+<td>Region Member (Table View)</td>
+<td>In table view, the following fields are listed for each region member:
+<ul>
+<li><em>ID</em>. The unique member ID.</li>
+<li><em>Name</em>. Region name.</li>
+<li><em>Host</em>. Member hostname.</li>
+<li><em>Heap Usage</em>. The total amount of heap used on the member in MB.</li>
+<li><em>CPU Usage</em>. CPU usage as a percent of available CPU.</li>
+<li><em>Uptime</em>. The amount of time elapsed since the member started.</li>
+<li><em>Accessor</em>. Indicates whether the member is an accessor member.</li>
+</ul></td>
+</tr>
+<tr class="even">
+<td><strong>Region Detail</strong></td>
+<td>When you have selected a region, the right hand pane displays the following information about the region:
+<ul>
+<li><em>Name</em>. Name of the region.</li>
+<li><em>Region Path</em>. Path for the region.</li>
+<li><em>Type</em>. For example, REPLICATE, PARTITION</li>
+<li><em>Members</em>. Number of members that are hosting the region.</li>
+<li><em>Empty Nodes</em>. Nodes where the region DataPolicy is defined as EMPTY or where LocalMaxMemory is set to 0.</li>
+<li><em>Entry Count</em>. Total number of entries in the region.</li>
+<li><em>Disk Usage</em>. Persistent data usage.</li>
+<li><em>Persistence</em>. Indicates whether the region's data is persisted to disk.</li>
+<li><em>Memory Usage</em>. The amount of memory used and total available memory (also shown as a percentage).</li>
+<li><em>Reads/Writes</em>. Summary of reads and writes served from memory and from disk stores over the last 15 minutes.</li>
+</ul></td>
+</tr>
+</tbody>
+</table>
+
+# <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser" class="no-quick-link"></a>Data Browser
+
+The Pulse Data Browser enables you to query region data. Note that there are two key attributes available on DistributedSystemMXBean (see [List of Geode JMX MBeans](../../managing/management/list_of_mbeans.html#topic_4BCF867697C3456D96066BAD7F39FC8B)) that you can use to configure limits for the result sets displayed in Data Browser:
+
+-   `QueryResultSetLimit` limits the number of rows that Data Browser queries return. 1000 rows are displayed by default.
+-   `QueryCollectionsDepth` limits the number of elements of a collection that Data Browser queries return. This attribute applies to query results contain collections such as Map, List, and so forth. The default value is 100 elements.
+
+See the `org.apache.geode.management.DistributedSystemMXBean` JavaDocs for information on available MBean methods and attributes.
+
+The following shows an example Data Browser view:
+
+<img src="../../images/pulse-data-browser.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_hhp_dz2_44" class="image imageleft" width="624" />
+
+Use these basic controls while in Data Browser view:
+
+1.  Search for the name of a specific region.
+2.  Select one or more regions to display the Geode members that host those regions. The hosting Geode members appear in the Region Members section.
+3.  Select one or more members from the Region Members section to restrict query results to those members.
+4.  Type in the text of a query to execute. See [Querying](../../developing/querying_basics/chapter_overview.html).
+5.  Display a list of previously-executed queries. Double-click on a query from the history list to copy it to the Query Editor, or delete the query from your history.
+6.  Execute your query or clear the contents of the Query Editor.
+7.  View the current query results.
+8.  Export the query results to a text file.
+9.  Return to [Cluster View](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8).
+
+# <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_bfk_sc3_wn" class="no-quick-link"></a>Alerts Widget
+
+The Alerts Widget appears in the right portion of the screen and displays a list of alerts.
+
+The alerts displayed for the cluster appear based on the alertLevel field set in the DistributedSystemMXBean. By default, log messages with the level of SEVERE are shown as alerts. You can modify the level by using the `DistributedMXBean.changeAlertLevel` method. See [System Alert Notifications](../../managing/management/notification_federation_and_alerts.html#topic_212EE5A2ABAB4E8E8EF71807C9ECEF1A__section_7463D13112D54406953416356835E290) for more information.
+
+<img src="../../images/pulse_alerts_widget.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_jrc_smt_qn" class="image" />
+
+Use these basic controls in the Alerts Widget:
+
+1.  Select an alert level to view only alerts with a specific severity.
+2.  Enter text in the search box to filter the list of alerts.
+3.  Select an alert and click Clear to remove it from the alert list.
+4.  Click **Clear All** to remove all alerts from the widget.
+5.  Double-click an alert to open a pop-up window that displays the full text of the alert message.
+6.  Click the check mark in an alert pop-up window to acknowledge the alert. Acknowledged alerts display a check mark in the list of alerts.
+7.  Triple-click the alert in the pop-up or in the alert list to select the message text. You can then copy and paste the text into another application.
+8.  Click the **X** to close the pop-up alert window.
+


[12/32] geode git commit: GEODE-2941 Update Pulse documentation

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/quickstart.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/quickstart.html.md.erb b/geode-docs/tools_modules/pulse/quickstart.html.md.erb
deleted file mode 100644
index 6bcf1dc..0000000
--- a/geode-docs/tools_modules/pulse/quickstart.html.md.erb
+++ /dev/null
@@ -1,827 +0,0 @@
----
-title: Pulse Quick Start (Embedded Mode)
----
-
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-<a id="topic_523F6DE33FE54307BBE8F83BB7D9355D"></a>
-
-
-Use Pulse in embedded mode to monitor a Geode deployment directly from a Geode JMX Manager. By default, the embedded Pulse application connects to the local JMX Manager that hosts the Pulse application. Optionally, configure Pulse to connect to a Geode system of your choice.
-
-To run Pulse in embedded mode:
-
-1.  Configure a Geode member to run as a JMX Manager node, specifying the HTTP port on which you will access the Pulse Web application (port 7070 by default). For example, the following command starts a Geode locator as a JMX Manager node, using the default HTTP port 7070 for the Pulse application:
-
-    ``` pre
-    gfsh
-    gfsh> start locator --name=loc1
-    ```
-
-    **Note:**
-    Geode locators become JMX Manager nodes by default. To start a non-locator member as a JMX Manager node, include the `--J=-Dgemfire.jmx-manager=true` option. To specify a non-default port number for the HTTP service that hosts the Pulse application, include the `--J=-Dgemfire.http-service-port=port_number` option when starting the JMX Manager node.
-
-    When the JMX Manager node boots, it starts an embedded Jetty instance and deploys the Pulse Web application at the specified or default HTTP port or 7070 by default.
-
-    `gfsh` automatically connects to the manager when you start it in this way. If you already started a manager process earlier, use the `connect` command in `gfsh` to connect to that process.
-
-2.  Access the embedded Pulse application from a Web browser. If you are connected to the Geode cluster using gfsh, use the `start pulse` command to load the correct URL in your browser:
-
-    ``` pre
-    gfsh> start pulse
-    ```
-
-    Or, enter the URL http://*address*:*http-service-port*/pulse directly in your Web browser, substituting the address and HTTP port of the manager. For example, you access Pulse on the local locator machine from Step 1 at the URL http://localhost:7070/pulse.
-
-3.  If you have configured authentication for the Pulse application, enter the username and password of a valid Pulse account in the login screen. Otherwise, enter the default "admin" in both fields. Click **Sign In** to continue.
-
-    See [Configuring Pulse Authentication](quickstart.html#topic_AC9FFAA6FB044279BAED7A3E099E07AC).
-
-4.  After you log in, Pulse displays the main cluster view for the local distributed system. See [Using Pulse Views](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404).
-
-**Note:**
-When running in embedded mode, the Pulse application connects only to the JMX Manager running in the locator or member that hosts Pulse. This enables you to monitor all members of that distributed system. You can also view (but not monitor) connected WAN clusters, and can view gateway senders and receivers that are configured in the local cluster.
-
-## <a id="topic_795C97B46B9843528961A094EE520782" class="no-quick-link"></a>Hosting Pulse on a Web Application Server
-
-Host Pulse on a dedicated Web application server to make the Pulse application available at a consistent address, or to use SSL for accessing the Pulse application. When you host Pulse in this way, you also configure Pulse to connect to a specific locator or JMX Manager node for monitoring.
-
-To host Pulse on a Web application server:
-
-1.  Set the `http-service-port` property to zero (`-Dgemfire.http-service-port=0`) when you start your Geode JMX Manager nodes. Setting this property to zero disables the embedded Web server for hosting the Pulse application.
-2.  Create a `pulse.properties` file somewhere in the classpath of your Web application server. For example, if you are hosting Pulse on Tomcat, create the `pulse.properties` file in the `$TOMCAT_SERVER/lib` directory.
-
-3.  Define the following configuration properties in the `pulse.properties` file:
-
-    <table>
-    <colgroup>
-    <col width="50%" />
-    <col width="50%" />
-    </colgroup>
-    <thead>
-    <tr class="header">
-    <th>Property</th>
-    <th>Description</th>
-    </tr>
-    </thead>
-    <tbody>
-    <tr class="odd">
-    <td><code class="ph codeph">pulse.useLocator</code></td>
-    <td>Specify &quot;true&quot; to configure Pulse to connect to a Geode Locator member, or &quot;false&quot; to connect directly to a JMX Manager.
-    <p>When Pulse connects to a Geode locator, the locator provides the address and port of an available JMX Manager to use for monitoring the distributed system. In most production deployments, you should connect Pulse to a locator instance; this allows Pulse to provide monitoring services using any available JMX Manager.</p>
-    <p>If you specify &quot;false,&quot; Pulse connects directly to a specific JMX Manager. If this manager is not available, the Pulse connection fails, even if another JMX Manager is available in the distributed system.</p></td>
-    </tr>
-    <tr class="even">
-    <td><code class="ph codeph">pulse.host</code></td>
-    <td>Specify the DNS name or IP address of the Geode locator or JMX Manager machine to which Pulse should connect. You specify either a locator or JMX Manager address depending on how you configured the <code class="ph codeph">pulse.useLocator</code> property.</td>
-    </tr>
-    <tr class="odd">
-    <td><code class="ph codeph">pulse.port</code></td>
-    <td>Specify the port number of the Geode locator or the HTTP port number of the JMX Manager to which Pulse should connect. You specify either a locator or JMX Manager port depending on how you configured the <code class="ph codeph">pulse.useLocator</code> property.
-    <p>If you configured <code class="ph codeph">pulse.useLocator=false</code>, then <code class="ph codeph">pulse.port</code> must correspond to the <code class="ph codeph">http-service-port</code> setting of the JMX Manager.</p></td>
-    </tr>
-    <tr class="even">
-    <td><code class="ph codeph">pulse.jmxUserName</code></td>
-    <td>If you configured authentication for the Geode JMX Manager node, specify a valid JMX user name that the Pulse application will use to authenticate to the JMX Manager.
-    <div class="note note">
-    **Note:**
-    <p>The JMX account that Pulse uses must have both read and write privileges.</p>
-    </div>
-    <p>See <a href="../../managing/management/jmx_manager_operations.html#topic_263072624B8D4CDBAD18B82E07AA44B6">Configuring a JMX Manager</a> for information about configuring authentication for JMX Manager nodes.</p></td>
-    </tr>
-    <tr class="odd">
-    <td><code class="ph codeph">pulse.jmxUserPassword</code></td>
-    <td>Specify the password of the JMX user account to use for authentication at startup.</td>
-    </tr>
-    </tbody>
-    </table>
-
-    For example, with this configuration Pulse connects to the locator at mylocator\[10334\] and accesses any available JMX Manager:
-
-    ``` pre
-    pulse.useLocator=true
-    pulse.host=locsrv.gemstone.com
-    pulse.port=10334
-    pulse.jmxUserName=pulseapp
-    pulse.jmxUserPassword=pulsepass
-    ```
-
-    With this configuration Pulse accesses only the JMX Manager instance at manager1\[8080\]:
-
-    ``` pre
-    pulse.useLocator=false
-    pulse.host=jmxsrv.gemstone.com
-    pulse.port=8080
-    pulse.jmxUserName=pulseapp
-    pulse.jmxUserPassword=pulsepass
-    ```
-
-4.  (Optional.) Configure authentication for the Pulse Web application using the instructions in [Configuring Pulse Authentication](quickstart.html#topic_AC9FFAA6FB044279BAED7A3E099E07AC).
-5.  Deploy the Pulse Web application to your application server. Geode installs the `pulse.war` file in the `tools/Pulse` subdirectory of your Geode installation directory. Depending on your application server, you may need to copy the `pulse.war` file to a deployment directory or use a configuration tool to deploy the file.
-6.  Access the Pulse application using the address, port, and application URL that you configure in your Web application server. For example, with Tomcat the default URL is http://*address*:8080/pulse. Your application server provides options for configuring the address, port, and application name; substitute the correct items to access the deployed Pulse application.
-
-    Pulse connects to the locator or JMX Manager that you configured in the `pulse.properties` file, authenticating using the credentials that you configured in the file.
-
-7.  If you have configured authentication for the Pulse application, enter the username and password of a valid Pulse account in the login screen. Otherwise, enter the default "admin" in both fields. Click **Sign In** to continue.
-
-    See [Configuring Pulse Authentication](quickstart.html#topic_AC9FFAA6FB044279BAED7A3E099E07AC).
-
-8.  After you log in, Pulse displays the main cluster view for the distributed system to which it has connected. See [Using Pulse Views](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404).
-
-## <a id="topic_AC9FFAA6FB044279BAED7A3E099E07AC" class="no-quick-link"></a>Configuring Pulse Authentication
-
-Pulse requires all users to authenticate themselves before they can use the Pulse Web application. If you have configured JMX authentication on the Geode JMX Manager node, the Pulse Web application itself may also need to authenticate itself to the Geode JMX Manager node on startup.
-
-## <a id="topic_AC9FFAA6FB044279BAED7A3E099E07AC__section_D31C25130C3D470083DAC76AE64DD1B6" class="no-quick-link"></a>Authenticating the Pulse Application to the JMX Manager
-
-If you run Pulse in embedded mode, the Pulse application runs on the JMX Manager node and no JMX authentication is required. You do not need to specify valid JMX credentials to start an embedded Pulse application.
-
-If you host Pulse on a Web Application server (non-embedded mode) and you configure JMX authentication on the Geode manager node, then the Pulse Web application must authenticate itself with the manager node when it starts. Specify the credentials of a valid JMX user account in the `pulse.properties` file, as described in [Hosting Pulse on a Web Application Server](quickstart.html#topic_795C97B46B9843528961A094EE520782).
-
-**Note:**
-The credentials that you specify must have both read and write privileges in the JMX Manager node. See [Configuring a JMX Manager](../../managing/management/jmx_manager_operations.html#topic_263072624B8D4CDBAD18B82E07AA44B6).
-
-## <a id="topic_AC9FFAA6FB044279BAED7A3E099E07AC__section_E3703ED899354839BE51278D3AE79062" class="no-quick-link"></a>Authenticating Pulse Users
-
-Pulse implements user authentication using the Spring security framework. The authentication configuration is specified in the `spring-security.xml` file, which is stored in the `WEB-INF` directory of Pulse WAR file. The `spring-security.xml` file contains bean definitions for role-based resource access, authentication profiles, and authentication handlers. The file also contains a default authentication manager bean definition.
-
-Pulse uses a profile-based authentication configuration. You can can choose to use either the default configuration profile or a custom configuration. The default profile uses the Spring security simple in-memory User Details Service to define a single user with the credentials:
-
-|            |            |
-|------------|------------|
-| User Name: | admin      |
-| Password:  | admin      |
-| Role:      | ROLE\_USER |
-
-Pulse uses this default authentication profile if you do not specify a profile when starting the application, or if you specify the default profile at startup using the system property:
-
-``` pre
--Dspring.profiles.active=pulse.authentication.default
-```
-
-You can also configure Pulse to use a custom authentication configuration by specifying activating the custom profile at startup with the system property:
-
-``` pre
--Dspring.profiles.active=pulse.authentication.custom
-```
-
-Using a custom configuration enables you to use either the simple in-memory User Details Service or an external properties file to authenticate users to the application. Even if you choose to use the default Spring security simple in-memory User Details Service, using a custom authentication configuration enables you to define your own user credentials rather than using the default "admin" account.
-
-**Note:**
-Geode also supports using an LDAP provider for Pulse authentication. See [Using LDAP Authentication in Pulse](#topic_AC9FFAA6FB044279BAED7A3E099E07AC__section_kjx_ylq_kq)
-
-To configure and use a custom authentication configuration:
-
-1.  Create a directory in which you will store the custom authentication configuration. For example:
-
-    ``` pre
-    $ mkdir /opt/pulse-config
-    ```
-
-2.  Ensure that the new directory you created is available on the Java CLASSPATH:
-
-    ``` pre
-    $ export CLASSPATH=$CLASSPATH:/opt/pulse-config
-    ```
-
-3.  Create a new text file named `pulse-authentication-custom.xml` in the new directory:
-
-    ``` pre
-    $ touch /opt/pulse-config/pulse-authentication-custom.xml
-    ```
-
-4.  Use a text editor to add the bean definitions for the authentication managers and providers that you want to use. The following listings show the example file contents for using the in-memory User Details Service and an external properties file:
-
-    **Example pulse-authentication-custom.xml for Spring simple in-memory User Details Service**
-
-    ``` pre
-    <beans:beans >
-      <authentication-manager>
-        <authentication-provider>
-          <user-service id="userDetailsService">
-            <user name="john" password="johnspassword" authorities="ROLE_USER " />
-            <user name="bob" password="bobspassword" authorities="ROLE_USER" />
-          </user-service>
-        </authentication-provider>
-      </authentication-manager>
-    </beans:beans>
-    ```
-
-    **Example pulse-authentication-custom.xml for external properties file**
-
-    ``` pre
-    <beans:beans >
-      <authentication-manager>
-        <authentication-provider>
-          <user-service properties="classpath:pulse-users.properties">
-          </user-service>
-        </authentication-provider>
-      </authentication-manager>
-    </beans:beans>
-    ```
-
-    With file-based authentication mechanism, you define the names and passwords for valid Pulse users in a `pulse-users.properties` file, which must be available in the classpath of the Pulse application. Each line in the `pulse-users.properties` file defines the username, password, and access level for a Pulse user with the format:
-
-    ``` pre
-    username=password,role,{enabled | disabled}
-    ```
-
-    The *role* entry must correspond to a valid Spring security role. For example, this entry shows the default "admin" user enabled with basic user access:
-
-    ``` pre
-    admin=admin,ROLE_USER,enabled
-    ```
-
-5.  When you start Geode members, specify the custom authentication profile using the `-Dspring.profiles.active=pulse.authentication.custom` system property. For example:
-
-    ``` pre
-    gfsh> start server --name=server1 --J=-Dspring.profiles.active=pulse.authentication.custom
-    ```
-
-6.  Start Pulse and log in using credentials that are authorized in the custom configuration.
-
-## <a id="topic_AC9FFAA6FB044279BAED7A3E099E07AC__section_kjx_ylq_kq" class="no-quick-link"></a>Using LDAP Authentication in Pulse
-
-This section provides instructions for using LDAP authentication with Pulse in either embedded and non-embedded mode.
-
-**Embedded Mode (Jetty)**
-
-To configure LDAP for Pulse:
-
-1.  Create a directory in which you will store the LDAP authentication configuration. For example:
-
-    ``` pre
-    $ mkdir /opt/pulse-config
-    ```
-
-    The directory name and location are up to you-- just make sure you use the same name when specifying the CLASSPATH for the Geode JMX Manager process.
-
-2.  Create a file named `pulse-authentication-custom.xml` with contents similar to the following and place it under the directory you created in step 1. For example:
-
-    ``` pre
-    <beans:beans xmlns="http://www.springframework.org/schema/security"
-        xmlns:beans="http://www.springframework.org/schema/beans"
-        xmlns:context="http://www.springframework.org/schema/context"
-        xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-        xsi:schemaLocation="http://www.springframework.org/schema/beans
-        http://www.springframework.org/schema/beans/spring-beans-3.2.xsd
-                   http://www.springframework.org/schema/security
-                   http://www.springframework.org/schema/security/spring-security-3.1.xsd
-                   http://www.springframework.org/schema/context
-        http://www.springframework.org/schema/context/spring-context-3.2.xsd">
-        
-            <ldap-server url="ldap://ldap.gemstone.com:389/dc=gemstone,dc=com" />
-
-            <authentication-manager>
-               <ldap-authentication-provider user-dn-pattern="uid={0},ou=ldapTesting"  
-                group-search-base="cn=PULSEUSER,ou=Groups" group-search-filter="memberUid={1}">
-               </ldap-authentication-provider>
-           </authentication-manager>
-     
-    </beans:beans>
-    ```
-
-    LDAP authentication in Pulse is hardcoded to use the PULSEUSER user group. Make sure you have have created users for this group.
-
-3.  When starting the JMX Manager from gfsh, use the following commands:
-
-    ``` pre
-    gfsh>start locator --name=loc --J=-Dspring.profiles.active=pulse.authentication.custom --classpath=/opt/pulse-config
-    ```
-
-    or
-
-    ``` pre
-    gfsh>start server --name=server1 --J=-Dspring.profiles.active=pulse.authentication.custom --classpath=/opt/pulse-config
-    ```
-
-4.  Start Pulse and log in using credentials that are authorized in the LDAP configuration.
-
-**Non-Embedded (Standalone Web Server) Mode (Tomcat)**
-
-To configure LDAP for Pulse:
-
-1.  Create a directory in which you will store the LDAP authentication configuration. For example:
-
-    ``` pre
-    $ mkdir /opt/pulse-config
-    ```
-
-2.  The directory name and location of the Pulse configuration files are up to you-- just make sure you use the same name when specifying the CLASSPATH for the Tomcat server.
-3.  Pass in the Spring profile when starting the web server. In Tomcat, all the VM arguments are set in the variable CATALINA\_OPTS, which you can define in your environment configuration file setenv.bat or setenv.sh.
-
-    For example, under %CATALINA\_HOME%/bin or $CATALINA\_HOME/bin, you can create a setenv batch file or script file (if not already present) that sets the following. On Windows:
-
-    ``` pre
-    set CATALINA_OPTS=-Dspring.profiles.active=pulse.authentication.custom
-    set "CLASSPATH=C:\pulse-config"
-    ```
-
-    or in Unix/Linux:
-
-    ``` pre
-    CATALINA_OPTS=-Dspring.profiles.active=pulse.authentication.custom
-    export CATALINA_OPTS
-    CLASSPATH=$CLASSPATH:/opt/pulse-config
-    ```
-
-4.  Create a file named `pulse-authentication-custom.xml` with contents similar to the following and place it under the directory you created in step 1. For example:
-
-    ``` pre
-    <beans:beans xmlns="http://www.springframework.org/schema/security"
-        xmlns:beans="http://www.springframework.org/schema/beans"
-        xmlns:context="http://www.springframework.org/schema/context"
-        xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-        xsi:schemaLocation="http://www.springframework.org/schema/beans
-        http://www.springframework.org/schema/beans/spring-beans-3.2.xsd
-                   http://www.springframework.org/schema/security
-                   http://www.springframework.org/schema/security/spring-security-3.1.xsd
-                   http://www.springframework.org/schema/context
-        http://www.springframework.org/schema/context/spring-context-3.2.xsd">
-        
-            <ldap-server url="ldap://ldap.gemstone.com:389/dc=gemstone,dc=com" />
-
-            <authentication-manager>
-               <ldap-authentication-provider user-dn-pattern="uid={0},ou=ldapTesting"  
-                group-search-base="cn=PULSEUSER,ou=Groups" group-search-filter="memberUid={1}">
-               </ldap-authentication-provider>
-           </authentication-manager>
-     
-    </beans:beans>
-    ```
-
-    LDAP authentication in Pulse is hardcoded to use the PULSEUSER user group. Make sure you have have created users for this group.
-
-5.  Deploy the application and start the server.
-
-## Configuring Pulse to Use HTTPS
-
-You can configure Pulse to use HTTPS in either embedded or non-embedded mode.
-
-In non-embedded mode where you are running Pulse on a standalone Web application server, you must use the Web server's SSL configuration to make the HTTP requests secure.
-
-In embedded mode, Geode uses an embedded Jetty server to host the
-Pulse Web application. To make the embedded server use HTTPS, you must
-enable the `http` SSL component in
-`gemfire.properties` or `gfsecurity-properties`.
-See [SSL](../../managing/security/ssl_overview.html) for details on configuring these parameters.
-
-These SSL parameters apply to all HTTP services hosted on the JMX Manager, which includes the following:
-
--   Developer REST API service
--   Management REST API service (for remote cluster management)
--   Pulse monitoring tool
-
-When the `http` SSL component is enabled, all HTTP services become
-SSL-enabled and you must configure your client applications
-accordingly. For SSL-enabled Pulse, you will need to configure your
-browsers with proper certificates.
-
-## <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404" class="no-quick-link"></a>Using Pulse Views
-
-Pulse provides a variety of different views to help you monitor Geode clusters, members, and regions.
-
-The following sections provide an overview of the main Pulse views:
-
--   [Cluster View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8)
--   [Member View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF)
--   [Region View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_D151776BAC8B4704A71F37F8B5CE063D)
--   [Data Browser](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser)
--   [Alerts Widget](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_bfk_sc3_wn)
-
-## <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8" class="no-quick-link"></a>Cluster View
-
-The cluster view is a high-level overview of the Geode distributed system. It is displayed immediately after you log into Pulse. Information displays around the perimeter of the cluster view show statistics such as memory usage, JVM pauses, and throughput. You can use the cluster view to drill down into details for individual members and regions in the distributed system.
-
-<img src="../../images/pulse_cluster_view.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_CC7B54903DF24030850E55965CDB6EC4" class="image imageleft" width="624" />
-
-Use these basic controls while in Cluster view:
-
-1.  Click Members or Data to display information about Geode members or data regions in the distributed system.
-2.  Click the display icons to display the Geode members using icon view, block view, or table view. Note that icon view is available only when displaying Members.
-
-    For example, the following shows Geode Members displayed in table view:
-
-    <img src="../../images/member_view_list.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_npw_sq3_wn" class="image" />
-    -   While in block view or table view, click the name of a Geode member to display additional information in the [Member View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF).
-    -   Click Topology, Server Groups, or Redundancy Zones to filter the view based on all members in the topology, configured server groups, or configured redundancy zones.
-    The following shows Geode Regions displayed in table view:
-    <img src="../../images/pulse-region-detail.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_glp_1jr_54" class="image" />
-    -   While in block view or table view, click the name of a Geode region to display additional information in the [Region View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_D151776BAC8B4704A71F37F8B5CE063D).
-
-3.  While in icon view, click a host machine icon to display the Geode members on that machine.
-4.  In the Alerts pane, click the severity tabs to filter the message display by the level of severity.
-
-**Cluster View Screen Components**
-
-The following table describes the data pieces displayed on the Cluster View screen.
-
-<table>
-<colgroup>
-<col width="50%" />
-<col width="50%" />
-</colgroup>
-<thead>
-<tr class="header">
-<th>Screen Component</th>
-<th>Description</th>
-</tr>
-</thead>
-<tbody>
-<tr class="odd">
-<td><strong>Cluster Status</strong></td>
-<td>Overall status of the distributed system being monitored. Possible statuses include Normal, Warning, or Severe.</td>
-</tr>
-<tr class="even">
-<td>Total Heap</td>
-<td>Total amount of memory (in GB) allocated to the Java heap across all members.</td>
-</tr>
-<tr class="odd">
-<td>Members</td>
-<td>Total number of members in the cluster.</td>
-</tr>
-<tr class="even">
-<td>Servers</td>
-<td>Total number of servers in the cluster.</td>
-</tr>
-<tr class="odd">
-<td>Clients</td>
-<td>Total number of clients in the cluster.</td>
-</tr>
-<tr class="even">
-<td>Locators</td>
-<td>Total number of locators in the cluster.</td>
-</tr>
-<tr class="odd">
-<td>Regions</td>
-<td>Total number of regions in the cluster.</td>
-</tr>
-<tr class="even">
-<td>Functions</td>
-<td>Total number of functions registered in the cluster.</td>
-</tr>
-<tr class="odd">
-<td>Unique CQs</td>
-<td>Total number of unique CQs. Corresponds to the UNIQUE _CQ_QUERY statistic.</td>
-</tr>
-<tr class="even">
-<td>Subscriptions</td>
-<td>Total number of client event subscriptions.</td>
-</tr>
-<tr class="odd">
-<td><strong>Cluster Members</strong></td>
-<td>Graphical, block, or table view of the members in the cluster.</td>
-</tr>
-<tr class="even">
-<td>Topology</td>
-<td>Organizes cluster members by DistributedMember Id.</td>
-</tr>
-<tr class="odd">
-<td>Server Groups</td>
-<td>Organizes cluster members by server group membership. If no server groups are configured, all members appear under the &quot;Default&quot; server group.</td>
-</tr>
-<tr class="even">
-<td>Redundancy Zones</td>
-<td>Organizes cluster members by redundancy zones. If no redundancy zones are configured, all members appear under the &quot;Default&quot; zone.</td>
-</tr>
-<tr class="odd">
-<td>Host Machine</td>
-<td>When you mouse over a machine icon in Topology View, a pop-up appears with the following machine statistics:
-<ul>
-<li><em>CPU Usage</em>. Percentage of CPU being used by Geode processes on the machine.</li>
-<li><em>Memory Usage</em>. Amount of memory (in MB) being used by Geode processes.</li>
-<li><em>Load Avg</em>. Average number of threads on the host machine that are in the run queue or are waiting for disk I/O over the last minutes. Corresponds to the Linux System statistic loadAverage1. If the load average is not available, a negative value is shown.</li>
-<li><em>Sockets</em>. Number of sockets currently open on the machine.</li>
-</ul></td>
-</tr>
-<tr class="even">
-<td>Member</td>
-<td>When you mouse over a member icon in Graphical View, a pop-up appears with the following member statistics:
-<ul>
-<li><em>CPU Usage</em>. Percentage of CPU being used by the Geode member process.</li>
-<li><em>Threads</em>. Number of threads running on the member.</li>
-<li><em>JVM Pauses</em>. Number of times the JVM used by the member process has paused due to garbage collection or excessive CPU usage.</li>
-<li><em>Regions</em>. Number of regions hosted on the member process.</li>
-<li><em>Clients</em>. Number of client currently connected to the member process.</li>
-<li><em>Gateway Sender</em>. Number of gateway senders configured on the member.</li>
-<li><em>Port</em>. Server port of the cache server member where clients can connect and perform cache operations.</li>
-<li><em>GemFire Version</em>. The version of the Geode member.</li>
-</ul></td>
-</tr>
-<tr class="odd">
-<td>Member</td>
-<td>In List View, the following data fields are displayed for each member:
-<ul>
-<li><em>ID</em>. DistributedMember Id of the member.</li>
-<li><em>Name</em>. Name of the member.</li>
-<li><em>Host</em>. Hostname or IP address where the member is running.</li>
-<li><em>Heap Usage</em>. Amount of JVM heap memory being used by the member process.</li>
-<li><em>CPU Usage</em>. Percentage of CPU being used by the Geode member process.</li>
-<li><em>Uptime</em>. How long the member has been up and running.</li>
-<li><em>Clients</em>. Number of clients currently connected to the member. It will have a value only if the member acts as a CacheServer.</li>
-</ul></td>
-</tr>
-<tr class="even">
-<td><strong>Key Statistics</strong></td>
-<td>Displays a few key performance measurements of the distributed system (over the last 15 minutes).</td>
-</tr>
-<tr class="odd">
-<td>Write/Sec</td>
-<td>Number of write operations per second that have occurred across the cluster. Each put/putAll operation counts as a write; for example, a putAll of 50 entries is counted as one write.</td>
-</tr>
-<tr class="even">
-<td>Read/Sec</td>
-<td>Number of read operations per second that have occurred across the cluster.</td>
-</tr>
-<tr class="odd">
-<td>Queries/Sec</td>
-<td>Number of queries per second that have been executed across the cluster.</td>
-</tr>
-<tr class="even">
-<td><strong>No. of JVM Pauses</strong></td>
-<td>Number of times the JVM has paused during the last five minutes to perform garbage collection.</td>
-</tr>
-<tr class="odd">
-<td><strong>WAN Information</strong></td>
-<td>If you have configured gateway senders or receivers for a multi-site (WAN) deployment, this box displays whether the remote cluster is reachable (working connectivity represented by a green triangle).</td>
-</tr>
-<tr class="even">
-<td><strong>Disk Throughput</strong></td>
-<td>Total disk throughput for all disks in cluster.</td>
-</tr>
-<tr class="odd">
-<td><strong>Alerts View</strong></td>
-<td>Displays alerts for the cluster.</td>
-</tr>
-</tbody>
-</table>
-
-## <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF" class="no-quick-link"></a>Member View
-
-When you select an individual Geode member in Cluster View, Pulse displays the regions available on that member, as well as member-specific information such as the configured listen ports.
-
-<img src="../../images/pulse_member_view.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_EDBD3D333B2741DCAA5CB94719B507B7" class="image imageleft" width="624" />
-
-Use these basic controls while in Member View:
-
-1.  Click the display icons to display regions using block view or table view.
-2.  Use the drop down menu to select a specific member or search for specific members by name.
-3.  Click **Cluster View** to return to Cluster View. See [Cluster View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8).
-4.  Click **Data Browser** to query region data. See [Data Browser](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser).
-
-**Member View Screen Components**
-
-The following table describes the data elements displayed on the Member View screen.
-
-<table>
-<colgroup>
-<col width="50%" />
-<col width="50%" />
-</colgroup>
-<thead>
-<tr class="header">
-<th>Screen Component</th>
-<th>Description</th>
-</tr>
-</thead>
-<tbody>
-<tr class="odd">
-<td><strong>Member Status</strong></td>
-<td>Overall status of the member being monitored. Possible statuses include Normal, Warning, or Severe.</td>
-</tr>
-<tr class="even">
-<td>Regions</td>
-<td>Total number of regions hosted on the member.</td>
-</tr>
-<tr class="odd">
-<td>Threads</td>
-<td>Total number of threads being executed on the member.</td>
-</tr>
-<tr class="even">
-<td>Sockets</td>
-<td>Total number of sockets currently open on the member.</td>
-</tr>
-<tr class="odd">
-<td>Load Avg.</td>
-<td>Average number of threads on the member that are in the run queue or are waiting for disk I/O over the last minute. Corresponds to the Linux System statistic loadAverage1. If the load average is not available, a negative value is shown.</td>
-</tr>
-<tr class="even">
-<td>Clients</td>
-<td>Current number of client connections to the member.</td>
-</tr>
-<tr class="odd">
-<td><strong>Member Regions</strong></td>
-<td>Block or table view of the regions hosted on the member.</td>
-</tr>
-<tr class="even">
-<td>Regions</td>
-<td>When you mouse over a region in block view, a pop-up appears with the following data fields:
-<ul>
-<li><em>Name</em>. Region name.</li>
-<li><em>Type</em>. For example, REPLICATE, PARTITION.</li>
-<li><em>EntryCount</em>. Number of entries in the region.</li>
-<li><em>EntrySize</em>. The aggregate entry size (in bytes) of all entries. For replicated regions this field will only provide a value if the eviction algorithm has been set to EvictionAlgorithm#LRU_ MEMORY. All partition regions will have this value. However, the value includes redundant entries and will also count the size of all the secondary entries on the node.</li>
-</ul></td>
-</tr>
-<tr class="odd">
-<td>Regions</td>
-<td>In table view, the following fields are listed for each region:
-<ul>
-<li><em>Name</em>. Region name.</li>
-<li><em>Type</em>. For example, REPLICATE, PARTITION.</li>
-<li><em>EntryCount</em>. Number of entries in the region.</li>
-<li><em>EntrySize</em>. The aggregate entry size (in bytes) of all entries. For replicated regions this field will only provide a value if the eviction algorithm has been set to EvictionAlgorithm#LRU_ MEMORY. All partition regions will have this value. However, the value includes redundant entries and will also count the size of all the secondary entries on the node.</li>
-<li><em>Scope</em>. Scope configured for the region.</li>
-<li><em>Disk Store Name</em>. Name of disk stores (if any) associated with the region.</li>
-<li><em>Disk Synchronous</em>. True if writes to disk are set to synchronous and false if not. This field reflects the configured disk-synchronous region attribute.</li>
-<li><em>Gateway Enabled</em>. Whether gateway sender and receiver configurations have been defined on members hosting this region.</li>
-</ul></td>
-</tr>
-<tr class="even">
-<td><strong>Member Clients</strong></td>
-<td>In table view, the following fields are listed for each client:
-<ul>
-<li><em>Id</em>. DistributedMember ID of the client process.</li>
-<li><em>Name</em>. Name of the client process.</li>
-<li><em>Host</em>. Hostname or IP address of the client process.</li>
-<li><em>Connected</em>. Whether the client process is currently connected to the member.</li>
-<li><em>Queue Size</em>. The size of the queue used by server to send events in case of a subscription enabled client or a client that has continuous queries running on the server.</li>
-<li><em>CPU Usage</em>. Percentage of CPU being used by the client process.</li>
-<li><em>Uptime</em>. Amount of time the client process has been running.</li>
-<li><em>Threads</em>. Threads being used by the member clients</li>
-<li><em>Gets</em>. Total number of successful get requests completed.</li>
-<li><em>Puts</em>. Total number of successful put requests completed.</li>
-</ul></td>
-</tr>
-<tr class="odd">
-<td><strong>Key Statistics</strong></td>
-<td>Displays a few key performance measurements for the member (over the last 15 minutes).</td>
-</tr>
-<tr class="even">
-<td>% CPU Usage</td>
-<td>Percentage of CPU used by the member.</td>
-</tr>
-<tr class="odd">
-<td>Read/Sec</td>
-<td>Number of read operations per second that have occurred on the member.</td>
-</tr>
-<tr class="even">
-<td>Write/Sec</td>
-<td>Number of write operations per second that have occurred on the member. Each put/putAll operation counts as a write; for example, a putAll of 50 entries is counted as one write.</td>
-</tr>
-<tr class="odd">
-<td><strong>Memory Usage</strong></td>
-<td>Total memory used on the member in MB.</td>
-</tr>
-<tr class="even">
-<td><strong>No. of JVM Pauses</strong></td>
-<td>Number of times the JVM has paused during the last five minutes due to garbage collection or excessive CPU usage.</td>
-</tr>
-<tr class="odd">
-<td><strong>WAN Information</strong></td>
-<td>Displays cluster information. This dialog box only appears if you have configured WAN functionality (gateway senders and gateway receivers).</td>
-</tr>
-<tr class="even">
-<td><strong>Disk Throughput</strong></td>
-<td>Rate of disk writes on the member.</td>
-</tr>
-</tbody>
-</table>
-
-## <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_D151776BAC8B4704A71F37F8B5CE063D" class="no-quick-link"></a>Region View
-
-The Pulse Region View provides a comprehensive overview of all regions in the Geode distributed system:
-
-<img src="../../images/pulse_data_view.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_A533852E38654E79BE5628E938E170EB" class="image imageleft" width="624" />
-
-Use these basic controls while in Region View:
-
-1.  Click the display icons to display all members that host the region using block view or table view.
-
-    (Click the name of a member to change to that member's [Member View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_3629814A3DF64D31A190495782DB0DBF).)
-
-2.  Search for specific members that host the current region.
-3.  Hover over a member name to display information such as the region entry count, entry size, and throughput on that member.
-4.  Click [Cluster View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8) or [Data Browser](#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser) to go to those screens.
-
-**Region View Screen Components**
-
-The following table describes the data elements displayed on the Region View screen.
-
-<table>
-<colgroup>
-<col width="50%" />
-<col width="50%" />
-</colgroup>
-<thead>
-<tr class="header">
-<th>Screen Component</th>
-<th>Description</th>
-</tr>
-</thead>
-<tbody>
-<tr class="odd">
-<td><strong>Region Members</strong></td>
-<td>Lists information about Geode members that host the region, either in block view or table view.</td>
-</tr>
-<tr class="even">
-<td>Region Member (Detail View)</td>
-<td>When you hover over a region member in block view, a pop-up appears with the following data fields:
-<ul>
-<li><em>Member Name</em>. The name of the Geode member hosting the region.</li>
-<li><em>EntryCount</em>. Number of entries for the region on that member.</li>
-<li><em>EntrySize</em>. The aggregate entry size (in bytes) of all entries on that member. For replicated regions this field will only provide a value if the eviction algorithm has been set to EvictionAlgorithm#LRU_ MEMORY. All partition regions will have this value. However, the value includes redundant entries and will also count the size of all the secondary entries on the node.</li>
-<li><em>Accessor</em>. Indicates whether the member is an accessor member.</li>
-<li><em>Reads/Writes</em>. Summary of reads and writes served from memory and from disk stores over the last 15 minutes.</li>
-</ul></td>
-</tr>
-<tr class="odd">
-<td>Region Member (Table View)</td>
-<td>In table view, the following fields are listed for each region member:
-<ul>
-<li><em>ID</em>. The unique member ID.</li>
-<li><em>Name</em>. Region name.</li>
-<li><em>Host</em>. Member hostname.</li>
-<li><em>Heap Usage</em>. The total amount of heap used on the member in MB.</li>
-<li><em>CPU Usage</em>. CPU usage as a percent of available CPU.</li>
-<li><em>Uptime</em>. The amount of time elapsed since the member started.</li>
-<li><em>Accessor</em>. Indicates whether the member is an accessor member.</li>
-</ul></td>
-</tr>
-<tr class="even">
-<td><strong>Region Detail</strong></td>
-<td>When you have selected a region, the right hand pane displays the following information about the region:
-<ul>
-<li><em>Name</em>. Name of the region.</li>
-<li><em>Region Path</em>. Path for the region.</li>
-<li><em>Type</em>. For example, REPLICATE, PARTITION</li>
-<li><em>Members</em>. Number of members that are hosting the region.</li>
-<li><em>Empty Nodes</em>. Nodes where the region DataPolicy is defined as EMPTY or where LocalMaxMemory is set to 0.</li>
-<li><em>Entry Count</em>. Total number of entries in the region.</li>
-<li><em>Disk Usage</em>. Persistent data usage.</li>
-<li><em>Persistence</em>. Indicates whether the region's data is persisted to disk.</li>
-<li><em>Memory Usage</em>. The amount of memory used and total available memory (also shown as a percentage).</li>
-<li><em>Reads/Writes</em>. Summary of reads and writes served from memory and from disk stores over the last 15 minutes.</li>
-</ul></td>
-</tr>
-</tbody>
-</table>
-
-## <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__sec_pulsedatabrowser" class="no-quick-link"></a>Data Browser
-
-The Pulse Data Browser enables you to query region data. Note that there are two key attributes available on DistributedSystemMXBean (see [List of Geode JMX MBeans](../../managing/management/list_of_mbeans.html#topic_4BCF867697C3456D96066BAD7F39FC8B)) that you can use to configure limits for the result sets displayed in Data Browser:
-
--   `QueryResultSetLimit` limits the number of rows that Data Browser queries return. 1000 rows are displayed by default.
--   `QueryCollectionsDepth` limits the number of elements of a collection that Data Browser queries return. This attribute applies to query results contain collections such as Map, List, and so forth. The default value is 100 elements.
-
-See the `org.apache.geode.management.DistributedSystemMXBean` JavaDocs for information on available MBean methods and attributes.
-
-The following shows an example Data Browser view:
-
-<img src="../../images/pulse-data-browser.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_hhp_dz2_44" class="image imageleft" width="624" />
-
-Use these basic controls while in Data Browser view:
-
-1.  Search for the name of a specific region.
-2.  Select one or more regions to display the Geode members that host those regions. The hosting Geode members appear in the Region Members section.
-3.  Select one or more members from the Region Members section to restrict query results to those members.
-4.  Type in the text of a query to execute. See [Querying](../../developing/querying_basics/chapter_overview.html).
-5.  Display a list of previously-executed queries. Double-click on a query from the history list to copy it to the Query Editor, or delete the query from your history.
-6.  Execute your query or clear the contents of the Query Editor.
-7.  View the current query results.
-8.  Export the query results to a text file.
-9.  Return to [Cluster View](quickstart.html#topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_9794B5754E474E10ABFBCD8B1DA240F8).
-
-## <a id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__section_bfk_sc3_wn" class="no-quick-link"></a>Alerts Widget
-
-The Alerts Widget appears in the right portion of the screen and displays a list of alerts.
-
-The alerts displayed for the cluster appear based on the alertLevel field set in the DistributedSystemMXBean. By default, log messages with the level of SEVERE are shown as alerts. You can modify the level by using the `DistributedMXBean.changeAlertLevel` method. See [System Alert Notifications](../../managing/management/notification_federation_and_alerts.html#topic_212EE5A2ABAB4E8E8EF71807C9ECEF1A__section_7463D13112D54406953416356835E290) for more information.
-
-<img src="../../images/pulse_alerts_widget.png" id="topic_F0ECE9E8179541CCA3D6C5F4FBA84404__image_jrc_smt_qn" class="image" />
-
-Use these basic controls in the Alerts Widget:
-
-1.  Select an alert level to view only alerts with a specific severity.
-2.  Enter text in the search box to filter the list of alerts.
-3.  Select an alert and click Clear to remove it from the alert list.
-4.  Click **Clear All** to remove all alerts from the widget.
-5.  Double-click an alert to open a pop-up window that displays the full text of the alert message.
-6.  Click the check mark in an alert pop-up window to acknowledge the alert. Acknowledged alerts display a check mark in the list of alerts.
-7.  Triple-click the alert in the pop-up or in the alert list to select the message text. You can then copy and paste the text into another application.
-8.  Click the **X** to close the pop-up alert window.
-

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/pulse/system_requirements.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/system_requirements.html.md.erb b/geode-docs/tools_modules/pulse/system_requirements.html.md.erb
deleted file mode 100644
index 05dffe6..0000000
--- a/geode-docs/tools_modules/pulse/system_requirements.html.md.erb
+++ /dev/null
@@ -1,35 +0,0 @@
----
-title:  Pulse System Requirements
----
-
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Verify that your system meets the installation and runtime requirements for Pulse.
-
-<a id="system_requirements__section_CBD5B04ACC554029B5C710CE8E244FEA"></a>
-The Pulse Web application has been tested for compatibility with the following Web browsers:
-
--   Internet Explorer 9.0.8112.16421
--   Safari 5.1.7 for Windows
--   Google Chrome 22.0.1229.79 m
--   Mozilla Firefox 16.0.1
-
-Pulse has been tested for standalone deployment on Tomcat and Jetty.
-Pulse may work with other operating systems and browsers upon which it has not been tested.
-
-

http://git-wip-us.apache.org/repos/asf/geode/blob/7b34cfd9/geode-docs/tools_modules/redis_adapter.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/redis_adapter.html.md.erb b/geode-docs/tools_modules/redis_adapter.html.md.erb
index 47da3fc..697fc4e 100644
--- a/geode-docs/tools_modules/redis_adapter.html.md.erb
+++ b/geode-docs/tools_modules/redis_adapter.html.md.erb
@@ -18,8 +18,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 -->
-<a id="topic_523F6DE33FE54307BBE8F83BB7D9355D"></a>
-
 
 The Geode Redis adapter allows Geode to function as a drop-in replacement for a Redis data store, letting Redis applications take advantage of Geode’s scaling capabilities without changing their client code. Redis clients connect to a Geode server in the same way they connect to a Redis server, using an IP address and a port number.
 


[06/32] geode git commit: GEODE-2955: Added / to the allowed list of characters in region name

Posted by kl...@apache.org.
GEODE-2955: Added / to the allowed list of characters in region name


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/f271667b
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/f271667b
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/f271667b

Branch: refs/heads/feature/GEODE-1279
Commit: f271667b172191429412799327ad53efed6864e4
Parents: ee9ca4e
Author: nabarun <nn...@pivotal.io>
Authored: Wed May 24 15:55:17 2017 -0700
Committer: nabarun <nn...@pivotal.io>
Committed: Wed May 24 15:55:17 2017 -0700

----------------------------------------------------------------------
 .../org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/f271667b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
index 8ce7028..3859804 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
@@ -143,7 +143,7 @@ public class LuceneServiceImpl implements InternalLuceneService {
           "Region names may not begin with a double-underscore: " + name);
     }
 
-    final Pattern NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_.]+");
+    final Pattern NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_./]+");
     // Ensure the region only contains valid characters
     Matcher matcher = NAME_PATTERN.matcher(name);
     if (!matcher.matches()) {


[07/32] geode git commit: GEODE-2913 Update Lucene index documentation

Posted by kl...@apache.org.
GEODE-2913 Update Lucene index documentation


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/096c22d5
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/096c22d5
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/096c22d5

Branch: refs/heads/feature/GEODE-1279
Commit: 096c22d5c73dc609651caf2887b4d95f162230ad
Parents: f271667
Author: Karen Miller <km...@pivotal.io>
Authored: Wed May 17 14:10:42 2017 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Wed May 24 17:07:49 2017 -0700

----------------------------------------------------------------------
 .../source/subnavs/geode-subnav.erb             |  22 +-
 .../implementing_authorization.html.md.erb      |   5 +
 .../statistics/statistics_list.html.md.erb      |  24 ++
 .../topics/cache-elements-list.html.md.erb      |   4 +-
 .../reference/topics/cache_xml.html.md.erb      |  63 +++++
 ...mory_requirements_for_cache_data.html.md.erb |   2 +
 .../gfsh/command-pages/create.html.md.erb       |   2 +-
 .../gfsh/command-pages/destroy.html.md.erb      |  13 +-
 .../lucene_integration.html.md.erb              | 283 +++++++++++++++----
 9 files changed, 339 insertions(+), 79 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-book/master_middleman/source/subnavs/geode-subnav.erb
----------------------------------------------------------------------
diff --git a/geode-book/master_middleman/source/subnavs/geode-subnav.erb b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
index c97e5ec..12b2151 100644
--- a/geode-book/master_middleman/source/subnavs/geode-subnav.erb
+++ b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
@@ -2308,16 +2308,7 @@ gfsh</a>
                                 <a href="/docs/guide/12/tools_modules/lucene_integration.html#using-the-apache-lucene-integration">Using the Apache Lucene Integration</a>
                             </li>
                             <li>
-                                <a href="/docs/guide/12/tools_modules/lucene_integration.html#java-api-example">Java API Example</a>
-                            </li>
-                            <li>
-                                <a href="/docs/guide/12/tools_modules/lucene_integration.html#search-example">Search Example</a>
-                            </li>
-                            <li>
-                                <a href="/docs/guide/12/tools_modules/lucene_integration.html#gfsh-api">Gfsh API</a>
-                            </li>
-                            <li>
-                                <a href="/docs/guide/12/tools_modules/lucene_integration.html#xml-configuration">XML Configuration</a>
+                                <a href="/docs/guide/12/tools_modules/lucene_integration.html#LuceneRandC">Requirements and Caveats</a>
                             </li>
                         </ul>
                     </li>
@@ -2557,6 +2548,14 @@ gfsh</a>
                                                 <a href="/docs/guide/12/reference/topics/cache_xml.html#index">&lt;index&gt;</a>
                                             </li>
                                             <li class="has_submenu">
+                                                <a href="/docs/guide/12/reference/topics/cache_xml.html#luceneindex">&lt;lucene:index&gt;</a>
+                                                <ul>
+                                                    <li>
+                                                        <a href="/docs/guide/12/reference/topics/cache_xml.html#lucenefield">&lt;lucene:field&gt;</a>
+                                                    </li>
+                                                </ul>
+                                            </li>
+                                            <li class="has_submenu">
                                                 <a href="/docs/guide/12/reference/topics/cache_xml.html#entry">&lt;entry&gt;</a>
                                                 <ul>
                                                     <li class="has_submenu">
@@ -3037,6 +3036,9 @@ gfsh</a>
                                 <a href="/docs/guide/12/reference/statistics/statistics_list.html#section_C48B654F973E4B44AD825D459C23A6CD">Locator (LocatorStatistics)</a>
                             </li>
                             <li>
+                                <a href="/docs/guide/12/reference/statistics/statistics_list.html#LuceneStats">Lucene Indexes (LuceneIndexStats)</a>
+                            </li>
+                            <li>
                                 <a href="/docs/guide/12/reference/statistics/statistics_list.html#topic_ohc_tjk_w5">Off-Heap (OffHeapMemoryStats)</a>
                             </li>
                             <li>

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/managing/security/implementing_authorization.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/managing/security/implementing_authorization.html.md.erb b/geode-docs/managing/security/implementing_authorization.html.md.erb
index f897e4c..d16280e 100644
--- a/geode-docs/managing/security/implementing_authorization.html.md.erb
+++ b/geode-docs/managing/security/implementing_authorization.html.md.erb
@@ -117,18 +117,21 @@ This table classifies the permissions assigned for `gfsh` operations.
 | create gateway-receiver                | DATA:MANAGE                      |
 | create gateway-sender                  | DATA:MANAGE                      |
 | create index                           | DATA:MANAGE:RegionName           |
+| create lucene index                    | DATA:MANAGE:RegionName           |
 | create region                          | DATA:MANAGE                      |
 | define index                           | DATA:MANAGE:RegionName           |
 | deploy                                 | DATA:MANAGE, DATA:WRITE, CLUSTER:MANAGE, and CLUSTER:WRITE |
 | describe client                        | CLUSTER:READ                     |
 | describe config                        | CLUSTER:READ                     |
 | describe disk-store                    | CLUSTER:READ                     |
+| describe lucene index                  | CLUSTER:READ                     |
 | describe member                        | CLUSTER:READ                     |
 | describe offline-disk-store            | CLUSTER:READ                     |
 | describe region                        | CLUSTER:READ                     |
 | destroy disk-store                     | DATA:MANAGE                      |
 | destroy function                       | DATA:MANAGE                      |
 | destroy index                          | DATA:MANAGE or DATA:MANAGE:RegionName |
+| destroy lucene index                   | DATA:MANAGE:RegionName           |
 | destroy region                         | DATA:MANAGE                      |
 | disconnect                             | DATA:MANAGE                      |
 | echo                                   | DATA:MANAGE                      |
@@ -152,6 +155,7 @@ This table classifies the permissions assigned for `gfsh` operations.
 | list functions                         | CLUSTER:READ                     |
 | list gateways                          | CLUSTER:READ                     |
 | list indexes                           | CLUSTER:READ                     |
+| list lucene indexes                    | CLUSTER:READ                     |
 | list members                           | CLUSTER:READ                     |
 | list regions                           | DATA:READ                        |
 | load-balance gateway-sender            | DATA:MANAGE                      |
@@ -165,6 +169,7 @@ This table classifies the permissions assigned for `gfsh` operations.
 | remove                                 | DATA:WRITE:RegionName or DATA:WRITE:RegionName:Key |
 | resume gateway-sender                  | DATA:MANAGE                      |
 | revoke mising-disk-store               | DATA:MANAGE                      |
+| search lucene                          | DATA:WRITE                       |
 | show dead-locks                        | CLUSTER:READ                     |
 | show log                               | CLUSTER:READ                     |
 | show metrics                           | CLUSTER:READ                     |

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/reference/statistics/statistics_list.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/reference/statistics/statistics_list.html.md.erb b/geode-docs/reference/statistics/statistics_list.html.md.erb
index c38b2f7..49e416e 100644
--- a/geode-docs/reference/statistics/statistics_list.html.md.erb
+++ b/geode-docs/reference/statistics/statistics_list.html.md.erb
@@ -60,6 +60,8 @@ Performance statistics are collected for each Java application or cache server t
 
 -   **[Locator (LocatorStatistics)](#section_C48B654F973E4B44AD825D459C23A6CD)**
 
+-   **[Lucene Indexes (LuceneIndexStats)](#LuceneStats)**
+
 -   **[Off-Heap (OffHeapMemoryStats)](#topic_ohc_tjk_w5)**
 
 -   **[Operating System Statistics - Linux](#section_923B28F01BC3416786D3AFBD87F22A5E)**
@@ -1006,6 +1008,28 @@ These statistics are on the Geode locator. The primary statistics are:
 | `RESPONSES_FROM_LOCATOR` | Number of responses this locator has sent to clients.                         |
 | `SERVER_LOAD_UPDATES`    | Total number of times a server load update has been received.                 |
 
+## <a id="LuceneStats" class="no-quick-link"></a>Lucene Indexes (LuceneIndexStats)
+
+These statistics quantify the use of Lucene indexes. The primary statistics are:
+
+| Statistic             | Description                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                         |
+|-----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `queryExecutions`         | The number of Lucene queries executed on this member.    |
+| `queryExecutionTime`         | The amount of time in nanoseconds spent executing Lucene queries.    |
+| `queryExecutionsInProgress`  | The number of query executions currently in progress.    |
+| `queryExecutionTotalHits`  | The total number of documents returned by query executions.    |
+| `repositoryQueryExecutions`  | The number of Lucene repository queries executed on this member.    |
+| `repositoryQueryExecutionTime`  | The amount of time in nanoseconds spent executing Lucene repository queries.    |
+| `repositoryQueryExecutionsInProgress`  | The number of repository query executions currently in progress.    |
+| `repositoryQueryExecutionTotalHits`  | The total number of documents returned by repository query executions.    |
+| `updates`  | The number of Lucene index documents added or removed on this member.    |
+| `updateTime`  | The amount of time in nanoseconds spent adding or removing documents from the index.    |
+| `updatesInProgress`  | The number of index updates in progress.    |
+| `commits`  | The number of Lucene index commits on this member.    |
+| `commitTime`  | The amount of time in nanoseconds spent in Lucene index commits.    |
+| `commitsInProgress`  | The number of Lucene index commits in progress.    |
+| `documents`  | The number of documents in the index.    |
+
 ## <a id="topic_ohc_tjk_w5" class="no-quick-link"></a>Off-Heap (OffHeapMemoryStats)
 
 These statistics quantify the use of off-heap memory. The primary statistics are:

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/reference/topics/cache-elements-list.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/reference/topics/cache-elements-list.html.md.erb b/geode-docs/reference/topics/cache-elements-list.html.md.erb
index 71e092b..2b1c035 100644
--- a/geode-docs/reference/topics/cache-elements-list.html.md.erb
+++ b/geode-docs/reference/topics/cache-elements-list.html.md.erb
@@ -151,7 +151,9 @@ For details, see [&lt;cache&gt; Element Reference](cache_xml.html#cache_xml_cach
             <config-property-value>
    <region>
       <region-attributes>
-      <index>>
+      <index>
+      <lucene:index>
+         <field>
       <entry>
          <key>
             <string>

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/reference/topics/cache_xml.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/reference/topics/cache_xml.html.md.erb b/geode-docs/reference/topics/cache_xml.html.md.erb
index a934b62..cf5d2b3 100644
--- a/geode-docs/reference/topics/cache_xml.html.md.erb
+++ b/geode-docs/reference/topics/cache_xml.html.md.erb
@@ -2685,6 +2685,7 @@ Defines a region in the cache. See [&lt;region-attributes&gt;](#region-attribute
 
 See [&lt;region-attributes&gt;](#region-attributes) for a complete listing of region attributes.
 
+
 ## <a id="index" class="no-quick-link"></a>&lt;index&gt;
 
 Describes an index to be created on a region. The index node, if any, should all come immediately after the "region-attributes" node. The "name" attribute is a required field which identifies the name of the index. See [Working with Indexes](../../developing/query_index/query_index.html) for more information on indexes.
@@ -2728,6 +2729,68 @@ Describes an index to be created on a region. The index node, if any, should all
 </region>
 ```
 
+<!-- start of Lucene index description -->
+## <a id="luceneindex" class="no-quick-link"></a>&lt;lucene:index&gt;
+
+Describes a Lucene index to be created on a region. The `lucene` namespace
+and the scoping operator (`:`) must be specified, as the Geode `cache`
+namespace also defines an `index` element (for OQL indexes).
+
+**API:** `org.apache.geode.cache.lucene` package
+
+| Attribute   | Description                                                                                                                                                                                                                                                                           | Default |
+|-------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|
+| name        | Required. Name of the Lucene index.   |         |
+
+**Example:**
+
+``` pre
+<cache
+    xmlns="http://geode.apache.org/schema/cache"
+    xmlns:lucene="http://geode.apache.org/schema/lucene"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:schemaLocation="http://geode.apache.org/schema/cache
+        http://geode.apache.org/schema/cache/cache-1.0.xsd
+        http://geode.apache.org/schema/lucene
+        http://geode.apache.org/schema/lucene/lucene-1.0.xsd"
+    version="1.0">
+
+    <region name="regionA" refid="PARTITION">
+        <lucene:index name="myIndex">
+            <lucene:field name="x" />
+            <lucene:field name="y" />
+        </lucene:index>
+    </region>
+</cache>
+```
+<!-- end of Lucene index description -->
+
+<!-- start of Lucene field description -->
+## <a id="lucenefield" class="no-quick-link"></a>&lt;lucene:field&gt;
+
+Describes a field to be included in a Lucene index. Including the
+`lucene` namespace and the scoping operator (`:`) clarifies,
+but is not required.
+
+**API:** `org.apache.geode.cache.lucene` package
+
+| Attribute   | Description                                                                                                                                                                                                                                                                           | Default |
+|-------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|
+| name        | Required. A string that defines the name of the field. If a single field is defined by the value `"__REGION_VALUE_FIELD"`, then the entire value is used as a single field.   |         |
+| analyzer    | A string that provides the path to the analyzer to use for this field. A value of `"null"` uses the default analyzer.  | `"null"` |
+
+**Example:**
+
+``` pre
+<region name="dataregion" refid="PARTITION_REDUNDANT">
+   <lucene:index name="full_value_index">
+     <lucene:field name="__REGION_VALUE_FIELD"/>
+   </lucene:index>
+</region>
+```
+
+<!-- end of Lucene field description -->
+
 ## <a id="entry" class="no-quick-link"></a>&lt;entry&gt;
 
 An "entry" element describes an entry to be added to a region. Note that if an entry with the given key already exists in the region, it will be replaced.

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb b/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb
index 1509a40..150814a 100644
--- a/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb
+++ b/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb
@@ -175,6 +175,8 @@ For indexes used in querying, the overhead varies greatly depending on the type
 
 -   If the index has a single value per region entry for the indexed expression, the index introduces at most 243 bytes per region entry. An example of this type of index is: `fromClause="/portfolios",               indexedExpression="id"`. The maximum of 243 bytes per region entry is reached if each entry has a unique value for the indexed expression. The overhead is reduced if the entries do not have unique index values.
 -   If each region entry has more than one value for the indexed expression, but no two region entries have the same value for it, then the index introduces at most 236 C + 75 bytes per region entry, where C is the average number of values per region entry for the expression.
+-   Lucene indexes add approximately 737 bytes per entry.
+The other index overhead estimates listed here also apply to Lucene indexes.
 
 ## <a id="topic_i1m_stz_j4" class="no-quick-link"></a>Estimating Management and Monitoring Overhead
 

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
index 4fb4c8c..1398352 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
@@ -683,7 +683,7 @@ create lucene index --name=value --region=value --field=value(,value)* [--analyz
 |----------------------------------------------------|----------------------------------------------------------------------------------------|---------|
 | <span class="keyword parmname">\\-\\-name</span>       | *Required.* Name of the index to create.                                               |         |
 | <span class="keyword parmname">\\-\\-region</span>     | *Required.* Name/Path of the region which corresponds to the "from" clause in a query. |         |
-| <span class="keyword parmname">\\-\\-field</span>      | *Required.* Field of the region values that are referenced by the index.               |         |
+| <span class="keyword parmname">\\-\\-field</span>      | *Required.* Field of the region values that are referenced by the index. To treat the entire value as a single field, specify `__REGION_VALUE_FIELD`.     |         |
 | <span class="keyword parmname">&#8209;&#8209;analyzer</span>   | Analyzer to extract terms from text                                  |         |
 | <span class="keyword parmname">\\-\\-group</span>      | The index will be created on all the members in the specified member groups.                     |         |
 

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/tools_modules/gfsh/command-pages/destroy.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/gfsh/command-pages/destroy.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/destroy.html.md.erb
index e6de426..afd78ee 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/destroy.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/destroy.html.md.erb
@@ -152,25 +152,22 @@ See also [create lucene index](create.html#create_lucene_index), [describe lucen
 **Syntax:**
 
 ``` pre
-destroy lucene index [--name=value] [--region=value]
+destroy lucene index --region=value [--name=value]
 ```
 
-**Note:**
-You must specify at least one of the parameter options. If you enter `destroy lucene index` without any parameters, the command will ask you to specify at least one option.
-
 **Parameters, destroy lucene index:**
 
 | Name                                           | Description                                                                  |
 |------------------------------------------------|------------------------------------------------------------------------------|
-| <span class="keyword parmname">\\-\\-name</span>   | Name of the index to be removed.                                            |
-| <span class="keyword parmname">\\-\\-region</span> | Name of the region from which an index or all indexes are to be removed. |
+| <span class="keyword parmname">&#8209;&#8209;region</span> | *Required.* Name of the region from which indexes are to be removed. If no `--name` option is specified, all indexes associated with the region are destroyed.|
+| <span class="keyword parmname">&#8209;&#8209;name</span>   | Name of the index to be removed.                                            |
 
 
 **Example Commands:**
 
 ``` pre
-destroy lucene index --member=server2
-destroy lucene index --name=MyKeyIndex
+destroy lucene index --region=region1
+destroy lucene index --region=region1 --name=MyKeyIndex
 ```
 
 ## <a id="topic_BEDACECF4599407794ACBC0E56B30F65" class="no-quick-link"></a>destroy region

http://git-wip-us.apache.org/repos/asf/geode/blob/096c22d5/geode-docs/tools_modules/lucene_integration.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/lucene_integration.html.md.erb b/geode-docs/tools_modules/lucene_integration.html.md.erb
index e97ce06..b83705b 100644
--- a/geode-docs/tools_modules/lucene_integration.html.md.erb
+++ b/geode-docs/tools_modules/lucene_integration.html.md.erb
@@ -35,85 +35,73 @@ The Apache Lucene integration:
 For more details, see Javadocs for the classes and interfaces that implement Apache Lucene indexes and searches, including
 `LuceneService`, `LuceneQueryFactory`, `LuceneQuery`, and `LuceneResultStruct`.
 
-## <a id="using-the-apache-lucene-integration" class="no-quick-link"></a>Using the Apache Lucene Integration
+# <a id="using-the-apache-lucene-integration" class="no-quick-link"></a>Using the Apache Lucene Integration
 
-You can create Apache Lucene indexes through a Java API, through the `gfsh` command-line utility, or by means of
-the `cache.xml` configuration file.
+You can interact with Apache Lucene indexes through a Java API,
+through the `gfsh` command-line utility,
+or by means of the `cache.xml` configuration file.
 
-To use Apache Lucene Integration, you will need two pieces of information:
+To use Apache Lucene to create and use indexes,
+you will need two pieces of information:
 
-1.  The name of the region to be indexed or searched
+1.  The name of the region to be indexed and searched
 2.  The names of the fields you wish to index
 
+## Key Points ###
 
-### Key Points ###
-
+- Apache Lucene indexes are supported only on partitioned regions.
+Replicated region types are *not* supported.
+- Lucene indexes reside on servers.
+There is no way to create a Lucene index on a client.
 - Only top level fields of objects stored in the region can be indexed.
-- Apache Lucene indexes are supported only on Partitioned regions.
 - A single index supports a single region. Indexes do not support multiple regions.
-- Heterogeneous objects in single region are supported.
-- Join queries between regions are not supported.
-- Nested objects are not supported.
-- The index needs to be created before the region is created.
+- Heterogeneous objects in a single region are supported.
+
+## <a id="lucene-index-create" class="no-quick-link"></a>Creating an Index
+
+Create the index before creating the region.
 
-## <a id="java-api-example" class="no-quick-link"></a>Java API Example
+When no analyzer is specified, the
+`org.apache.lucene.analysis.standard.StandardAnalyzer` will be used.
+
+### <a id="api-create-example" class="no-quick-link"></a>Java API Example to Create an Index
 
 ``` pre
 // Get LuceneService
 LuceneService luceneService = LuceneServiceProvider.get(cache);
  
-// Create Index on fields with default analyzer:
-luceneService.createIndex(indexName, regionName, "field1", "field2", "field3");
- 
-Region region = cache.createRegionFactory(RegionShortcut.PARTITION).create(regionName);
+// Create the index on fields with default analyzer
+//  prior to creating the region
+luceneService.createIndexFactory()
+  .addField("name")
+  .addField("zipcode")
+  .create(indexName, regionName);
  
+Region region = cache.createRegionFactory(RegionShortcut.PARTITION)
+  .create(regionName);
 ```
 
-## <a id="search-example" class="no-quick-link"></a>Search Example
-
-``` pre
-LuceneQuery<String, Person> query = luceneService.createLuceneQueryFactory()
-  .setResultLimit(10)
-  .create(indexName, regionName, "Main Street", "address");
-
-Collection<Person> results = query.findValues();
-```
-
-
-## <a id="gfsh-api" class="no-quick-link"></a>Gfsh API
-
-The gfsh command-line utility supports five Apache Lucene actions:
-
-<dt><a href="gfsh/command-pages/create.html#create_lucene_index"><b>create lucene index</b></a></dt>
-    <dd>Create a Lucene index that can be used to execute queries.</dd>
-<dt><a href="gfsh/command-pages/describe.html#describe_lucene_index"><b>describe lucene index</b></a></dt>
-    <dd>Describe a Lucene index.</dd>
-<dt><a href="gfsh/command-pages/destroy.html#destroy_lucene_index"><b>destroy lucene index</b></a></dt>
-    <dd>Destroy a Lucene index.</dd>
-<dt><a href="gfsh/command-pages/list.html#list_lucene_indexes"><b>list lucene indexes</b></a></dt>
-    <dd>List Lucene indexes created for all members.</dd>
-<dt><a href="gfsh/command-pages/search.html#search_lucene"><b>search lucene</b></a></dt>
-    <dd>Search a Lucene index.</dd>
+### <a id="gfsh-create-example" class="no-quick-link"></a>Gfsh Example to Create an Index
 
-**Gfsh command-line examples:**
+For details, see the [gfsh create lucene index](gfsh/command-pages/create.html#create_lucene_index") command reference page.
 
 ``` pre
-// List Index
-gfsh> list lucene indexes --with-stats
-
-// Create Index
 gfsh>create lucene index --name=indexName --region=/orders --field=customer,tags
-
-// Create Index, specifying a custom analyzer for the second field
-// Note: "null" in the first analyzer position means "use the default analyzer for the first field"
-gfsh>create lucene index --name=indexName --region=/orders --field=customer,tags --analyzer=null,org.apache.lucene.analysis.bg.BulgarianAnalyzer
-
-// Execute Lucene query
-gfsh> lucene search --regionName=/orders -queryStrings="John*" --defaultField=field1 --limit=100
 ```
 
+``` pre
+// Create an index, specifying a custom analyzer for the second field
+// Note: "null" in the first analyzer position uses the default analyzer
+// for the first field
+gfsh>create lucene index --name=indexName --region=/orders
+  --field=customer,tags --analyzer=null,org.apache.lucene.analysis.bg.BulgarianAnalyzer
+```
+To use the entire value as a single field set the required `--field`
+option to be `__REGION_VALUE_FIELD`.
+This is only supported when the region entry value is a `String`, `Long`,
+`Integer`, `Float`, or `Double`.
 
-## <a id="xml-configuration" class="no-quick-link"></a>XML Configuration
+### <a id="xml-configuration" class="no-quick-link"></a>XML Configuration to Create an Index
 
 ``` pre
 <cache
@@ -127,12 +115,189 @@ gfsh> lucene search --regionName=/orders -queryStrings="John*" --defaultField=fi
     version="1.0">
  
     <region name="region" refid="PARTITION">
-        <lucene:index name="index">
-          <lucene:field name="a" analyzer="org.apache.lucene.analysis.core.KeywordAnalyzer"/>
-          <lucene:field name="b" analyzer="org.apache.lucene.analysis.core.SimpleAnalyzer"/>
-          <lucene:field name="c" analyzer="org.apache.lucene.analysis.standard.ClassicAnalyzer"/>
+        <lucene:index name="myIndex">
+          <lucene:field name="a" 
+                        analyzer="org.apache.lucene.analysis.core.KeywordAnalyzer"/>
+          <lucene:field name="b" 
+                        analyzer="org.apache.lucene.analysis.core.SimpleAnalyzer"/>
+          <lucene:field name="c" 
+                        analyzer="org.apache.lucene.analysis.standard.ClassicAnalyzer"/>
+          <lucene:field name="d" />
         </lucene:index>
     </region>
 </cache>
 ```
+## <a id="lucene-index-query" class="no-quick-link"></a>Queries
+
+### <a id="gfsh-query-example" class="no-quick-link"></a>Gfsh Example to Query using a Lucene Index
+
+For details, see the [gfsh search lucene](gfsh/command-pages/search.html#search_lucene") command reference page.
+
+``` pre
+gfsh> lucene search --regionName=/orders -queryStrings="John*" --defaultField=field1
+   --limit=100
+```
+
+### <a id="api-query-example" class="no-quick-link"></a>Java API Example to Query using a Lucene Index
+
+``` pre
+LuceneQuery<String, Person> query = luceneService.createLuceneQueryFactory()
+  .setLimit(10)
+  .create(indexName, regionName, "name:John AND zipcode:97006", defaultField);
+
+Collection<Person> results = query.findValues();
+```
+
+## <a id="lucene-index-destroy" class="no-quick-link"></a>Destroying an Index
+
+Since a region destroy operation does not cause the destruction
+of any Lucene indexes,
+destroy any Lucene indexes prior to destroying the associated region.
+
+### <a id="API-destroy-example" class="no-quick-link"></a>Java API Example to Destroy a Lucene Index
+
+``` pre
+luceneService.destroyIndex(indexName, regionName);
+```
+An attempt to destroy a region with a Lucene index will result in
+an `IllegalStateException`,
+issuing an error message similar to:
+
+``` pre
+java.lang.IllegalStateException: The parent region [/orders] in colocation chain
+ cannot be destroyed, unless all its children [[/indexName#_orders.files]] are
+ destroyed
+at org.apache.geode.internal.cache.PartitionedRegion
+    .checkForColocatedChildren(PartitionedRegion.java:7231)
+at org.apache.geode.internal.cache.PartitionedRegion
+    .destroyRegion(PartitionedRegion.java:7243)
+at org.apache.geode.internal.cache.AbstractRegion
+    .destroyRegion(AbstractRegion.java:308)
+at DestroyLuceneIndexesAndRegionFunction
+    .destroyRegion(DestroyLuceneIndexesAndRegionFunction.java:46)
+```
+### <a id="gfsh-destroy-example" class="no-quick-link"></a>Gfsh Example to Destroy a Lucene Index
+
+For details, see the [gfsh destroy lucene index](gfsh/command-pages/destroy.html#destroy_lucene_index") command reference page.
+
+The error message that results from an attempt to destroy a region
+prior to destroying its associated Lucene index
+will be similar to:
+
+``` pre
+Error occurred while destroying region "orders".
+ Reason: The parent region [/orders] in colocation chain cannot be destroyed,
+ unless all its children [[/indexName#_orders.files]] are destroyed
+```
+
+## <a id="lucene-index-change" class="no-quick-link"></a>Changing an Index
+
+Changing an index requires rebuilding it.
+Implement these steps to change an index:
 
+1. Export all region data
+2. Destroy the Lucene index
+3. Destroy the region
+4. Create a new index
+5. Create a new region without the user-defined business logic callbacks
+6. Import the region data with the option to turn on callbacks. 
+The callbacks will be to invoke a Lucene async event listener to index
+the data.
+7. Alter the region to add the user-defined business logic callbacks
+
+## <a id="addl-gfsh-api" class="no-quick-link"></a>Additional Gfsh Commands
+
+See the [gfsh describe lucene index](gfsh/command-pages/describe.html#describe_lucene_index") command reference page for the command that prints details about
+a specific index.
+
+See the [gfsh list lucene index](gfsh/command-pages/list.html#list_lucene_index") command reference page
+for the command that prints details about the 
+Lucene indexes created for all members.
+
+# <a id="LuceneRandC" class="no-quick-link"></a>Requirements and Caveats
+
+- Join queries between regions are not supported.
+- Nested objects are not supported.
+- Lucene indexes will not be stored within off-heap memory.
+- Lucene queries from within transactions are not supported.
+On an attempt to query from within a transaction,
+a `LuceneQueryException` is thrown, issuing an error message
+on the client (accessor) similar to:
+
+``` pre
+Exception in thread "main" org.apache.geode.cache.lucene.LuceneQueryException:
+ Lucene Query cannot be executed within a transaction
+at org.apache.geode.cache.lucene.internal.LuceneQueryImpl
+    .findTopEntries(LuceneQueryImpl.java:124)
+at org.apache.geode.cache.lucene.internal.LuceneQueryImpl
+    .findPages(LuceneQueryImpl.java:98)
+at org.apache.geode.cache.lucene.internal.LuceneQueryImpl
+    .findPages(LuceneQueryImpl.java:94)
+at TestClient.executeQuerySingleMethod(TestClient.java:196)
+at TestClient.main(TestClient.java:59)
+```
+- Lucene indexes must be created prior to creating the region.
+If an attempt is made to create a Lucene index after creating the region,
+the error message will be similar to:
+
+``` pre
+       Member                | Status
+---------------------------- | ------------------------------------------------------
+192.0.2.0(s2:97639)<v2>:1026 | Failed: The lucene index must be created before region
+192.0.2.0(s3:97652)<v3>:1027 | Failed: The lucene index must be created before region
+192.0.2.0(s1:97626)<v1>:1025 | Failed: The lucene index must be created before region
+```
+- An invalidate operation on a region entry does not invalidate a corresponding
+Lucene index entry.
+A query on a Lucene index that contains values that
+have been invalidated can return results that no longer exist.
+Therefore, do not combine entry invalidation with queries on Lucene indexes.
+- Lucene indexes are not supported for regions that have eviction configured
+with a local destroy.
+Eviction can be configured with overflow to disk,
+but only the region data is overflowed to disk,
+not the Lucene index.
+On an attempt to create a region with eviction configured to do local destroy
+(with a Lucene index),
+an `UnsupportedOperationException` will be thrown,
+issuing an error message similar to:
+
+``` pre
+[error 2017/05/02 16:12:32.461 PDT <main> tid=0x1] 
+ java.lang.UnsupportedOperationException:
+ Lucene indexes on regions with eviction and action local destroy are not supported
+Exception in thread "main" java.lang.UnsupportedOperationException:
+ Lucene indexes on regions with eviction and action local destroy are not supported
+at org.apache.geode.cache.lucene.internal.LuceneRegionListener
+    .beforeCreate(LuceneRegionListener.java:85)
+at org.apache.geode.internal.cache.GemFireCacheImpl
+    .invokeRegionBefore(GemFireCacheImpl.java:3154)
+at org.apache.geode.internal.cache.GemFireCacheImpl
+    .createVMRegion(GemFireCacheImpl.java:3013)
+at org.apache.geode.internal.cache.GemFireCacheImpl
+    .basicCreateRegion(GemFireCacheImpl.java:2991)
+```
+- Be aware that using the same field name in different objects
+where the field has different data types 
+may have unexpected consequences.
+For example, if an index on the field SSN has the following entries
+    - `Object_1 object_1` has String SSN = "1111"
+    - `Object_2 object_2` has Integer SSN = 1111
+    - `Object_3 object_3` has Float SSN = 1111.0
+
+    Integers and floats will not be converted into strings.
+    They remain as `IntPoint` and `FloatPoint` within Lucene.
+    The standard analyzer will not try to tokenize these values.
+    The standard analyzer will only try to break up string values.
+    So, a string search for "SSN: 1111" will return `object_1`.
+    An `IntRangeQuery` for `upper limit : 1112` and `lower limit : 1110`
+will return `object_2`.
+    And, a `FloatRangeQuery` with `upper limit : 1111.5` and `lower limit : 1111.0`
+will return `object_3`.
+- Backups should only be made for regions with Lucene indexes
+when there are no puts, updates, or deletes in progress.
+Incremental backups will not be consistent for the region and
+its index upon restart if these operations were in progress,
+due to the delayed processing associated with the asynchronous event queue.
+If region data needs to be restored from a backup,
+follow the same procedure as given for changing an index.


[28/32] geode git commit: GEODE-2420: Warn a user if they try to export too much data - document —file-size-limit gfsh option

Posted by kl...@apache.org.
GEODE-2420: Warn a user if they try to export too much data - document —file-size-limit gfsh option


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/31971403
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/31971403
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/31971403

Branch: refs/heads/feature/GEODE-1279
Commit: 31971403c2b6442bc27f03ce4a5bcaf49cb105f7
Parents: 2fb7381
Author: Dave Barnes <db...@pivotal.io>
Authored: Tue May 30 15:23:09 2017 -0700
Committer: Dave Barnes <db...@pivotal.io>
Committed: Tue May 30 15:23:59 2017 -0700

----------------------------------------------------------------------
 geode-docs/tools_modules/gfsh/command-pages/export.html.md.erb | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/31971403/geode-docs/tools_modules/gfsh/command-pages/export.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/gfsh/command-pages/export.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/export.html.md.erb
index ccf95cc..f0c6d54 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/export.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/export.html.md.erb
@@ -170,7 +170,8 @@ The `--dir` parameter specifies a local directory to which log files will be wri
 ``` pre
 export logs [--dir=value] [--group=value(,value)*] [--member=value(,value)*] 
 [--log-level=value] [--only-log-level=value] [--merge-log=value] 
-[--start-time=value] [--end-time=value] [logs-only(=value)?] [--stats-only(=value)?]
+[--start-time=value] [--end-time=value] [logs-only(=value)?] 
+[--stats-only(=value)?] [--file-size-limit(=value)?]
 ```
 
 | Name                                                   | Description                                                                                                                | Default Value |
@@ -185,6 +186,7 @@ export logs [--dir=value] [--group=value(,value)*] [--member=value(,value)*]
 | <span class="keyword parmname">\\-\\-end-time</span>       | Log entries that occurred before this time will be exported. Format: yyyy/MM/dd/HH/mm/ss/SSS/z OR yyyy/MM/dd               | no limit      |
 | <span class="keyword parmname">\\-\\-logs-only</span>       | Whether to export only logs (not statistics)               | If parameter not specified: false. If parameter specified without a value: true      |
 | <span class="keyword parmname">\\-\\-stats-only</span>       | Whether to export only statistics (not logs)               | If parameter not specified: false. If parameter specified without a value: true      |
+| <span class="keyword parmname">\\-\\-file-size-limit</span>       | Limits size of the file that can be exported. Specify 0 (zero) for no limit. Value is in megabytes by default or [k,m,g,t] may be specified.              | If parameter not specified: 100m. If parameter specified without a value: 0      |
 
 <span class="tablecap">Table 4. Export Logs Parameters</span>
 


[11/32] geode git commit: GEODE-2944: Added __REGION_VALUE_FIELD explanation to lucene create index help

Posted by kl...@apache.org.
GEODE-2944: Added __REGION_VALUE_FIELD explanation to lucene create index help

	This closes #533


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/b7faa083
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/b7faa083
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/b7faa083

Branch: refs/heads/feature/GEODE-1279
Commit: b7faa083aa9da5a9e7c3c579584be8f28faae02d
Parents: c793f74
Author: David Anuta <da...@gmail.com>
Authored: Wed May 24 17:05:33 2017 -0700
Committer: nabarun <nn...@pivotal.io>
Committed: Thu May 25 11:23:30 2017 -0700

----------------------------------------------------------------------
 .../apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/b7faa083/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
index d0a2999..db9f7b9 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
@@ -40,7 +40,7 @@ public class LuceneCliStrings {
       "Name/Path of the region on which to create the lucene index.";
   public static final String LUCENE_CREATE_INDEX__FIELD = "field";
   public static final String LUCENE_CREATE_INDEX__FIELD_HELP =
-      "fields on the region values which are stored in the lucene index.";
+      "Fields on the region values which are stored in the lucene index.\nUse __REGION_VALUE_FIELD if the entire region value should be indexed.\n__REGION_VALUE_FIELD is valid only if the region values are strings or numbers.";
   public static final String LUCENE_CREATE_INDEX__ANALYZER = "analyzer";
   public static final String LUCENE_CREATE_INDEX__ANALYZER_HELP =
       "Type of the analyzer for each field.";


[29/32] geode git commit: GEODE-2957 gfsh create lucene index "null" becomes "DEFAULT"

Posted by kl...@apache.org.
GEODE-2957 gfsh create lucene index "null" becomes "DEFAULT"

    This closes #543


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/80675180
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/80675180
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/80675180

Branch: refs/heads/feature/GEODE-1279
Commit: 806751805cb47877b609ee35cbfa21b362a3d7b3
Parents: 3197140
Author: Karen Miller <km...@pivotal.io>
Authored: Fri May 26 11:57:49 2017 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Tue May 30 15:44:48 2017 -0700

----------------------------------------------------------------------
 .../gfsh/command-pages/create.html.md.erb       | 21 ++++++++++++--------
 .../lucene_integration.html.md.erb              |  4 ++--
 2 files changed, 15 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/80675180/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
index 1398352..ef68f90 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
@@ -683,26 +683,31 @@ create lucene index --name=value --region=value --field=value(,value)* [--analyz
 |----------------------------------------------------|----------------------------------------------------------------------------------------|---------|
 | <span class="keyword parmname">\\-\\-name</span>       | *Required.* Name of the index to create.                                               |         |
 | <span class="keyword parmname">\\-\\-region</span>     | *Required.* Name/Path of the region which corresponds to the "from" clause in a query. |         |
-| <span class="keyword parmname">\\-\\-field</span>      | *Required.* Field of the region values that are referenced by the index. To treat the entire value as a single field, specify `__REGION_VALUE_FIELD`.     |         |
-| <span class="keyword parmname">&#8209;&#8209;analyzer</span>   | Analyzer to extract terms from text                                  |         |
+| <span class="keyword parmname">\\-\\-field</span>      | *Required.* Field of the region values that are referenced by the index. To treat the entire value as a single field, specify `__REGION_VALUE_FIELD`. |         |
+| <span class="keyword parmname">&#8209;&#8209;analyzer</span>   | Analyzer to extract terms from text. Use `DEFAULT` to specify the default analyzer.                                  |         |
 | <span class="keyword parmname">\\-\\-group</span>      | The index will be created on all the members in the specified member groups.                     |         |
 
 
 **Example Commands:**
 
 ``` pre
-create region --name=Person --type=PARTITION_REDUNDANT_PERSISTENT
-create lucene index --name=customerIndex --region=/Customer --field=symbol,revenue,SSN,name,email,address,__REGION_VALUE_FIELD
-create lucene index --name=analyzerIndex --region=/Person --field=name,email,address,revenue --analyzer=null,org.apache.lucene.analysis.core.KeywordAnalyzer,examples.MyCharacterAnalyzer,null
+gfsh>create region --name=Person --type=PARTITION_REDUNDANT_PERSISTENT
+
+gfsh>create lucene index --name=customerIndex --region=/Customer 
+     --field=symbol,revenue,SSN,name,email,address,__REGION_VALUE_FIELD
+
+gfsh>create lucene index --name=analyzerIndex --region=/Person 
+     --field=name,email,address,revenue 
+     --analyzer=DEFAULT,org.apache.lucene.analysis.core.KeywordAnalyzer,examples.MyCharacterAnalyzer,DEFAULT
 ```
 
 **Sample Output:**
 
 ``` pre
 gfsh>create lucene index --name=testIndex --region=testRegion --field=__REGION_VALUE_FIELD
-                 Member                  | Status
----------------------------------------- | ---------------------------------
-192.168.1.23(server50505:17200)<v1>:1025 | Successfully created lucene index
+               Member                  | Status
+-------------------------------------- | ---------------------------------
+192.168.1.23(server505:17200)<v1>:1025 | Successfully created lucene index
 ```
 
 ## <a id="topic_54B0985FEC5241CA9D26B0CE0A5EA863" class="no-quick-link"></a>create region

http://git-wip-us.apache.org/repos/asf/geode/blob/80675180/geode-docs/tools_modules/lucene_integration.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/lucene_integration.html.md.erb b/geode-docs/tools_modules/lucene_integration.html.md.erb
index 7f5afdc..41d429b 100644
--- a/geode-docs/tools_modules/lucene_integration.html.md.erb
+++ b/geode-docs/tools_modules/lucene_integration.html.md.erb
@@ -89,10 +89,10 @@ gfsh>create lucene index --name=indexName --region=/orders --field=customer,tags
 
 ``` pre
 // Create an index, specifying a custom analyzer for the second field
-// Note: "null" in the first analyzer position uses the default analyzer
+// Note: "DEFAULT" in the first analyzer position uses the default analyzer
 // for the first field
 gfsh>create lucene index --name=indexName --region=/orders
-  --field=customer,tags --analyzer=null,org.apache.lucene.analysis.bg.BulgarianAnalyzer
+  --field=customer,tags --analyzer=DEFAULT,org.apache.lucene.analysis.bg.BulgarianAnalyzer
 ```
 To use the entire value as a single field set the required `--field`
 option to be `__REGION_VALUE_FIELD`.


[30/32] geode git commit: Upping read timeout in HAInterestTestCase

Posted by kl...@apache.org.
Upping read timeout in HAInterestTestCase

These tests seem to occasionally fail. Upping the timeout from 1 second
to get rid of spurious timing issues.


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/76ff5061
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/76ff5061
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/76ff5061

Branch: refs/heads/feature/GEODE-1279
Commit: 76ff50613ad7703305c2d7dec0b4d1c8f5bb0873
Parents: 8067518
Author: Dan Smith <up...@apache.org>
Authored: Tue May 30 16:37:33 2017 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Tue May 30 16:37:33 2017 -0700

----------------------------------------------------------------------
 .../geode/internal/cache/tier/sockets/HAInterestTestCase.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/76ff5061/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/HAInterestTestCase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/HAInterestTestCase.java b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/HAInterestTestCase.java
index 858f603..5d3ae2d 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/HAInterestTestCase.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/HAInterestTestCase.java
@@ -919,7 +919,7 @@ public class HAInterestTestCase extends JUnit4DistributedTestCase {
     try {
       p = (PoolImpl) PoolManager.createFactory().addServer(host, PORT1).addServer(host, PORT2)
           .addServer(host, PORT3).setSubscriptionEnabled(true).setSubscriptionRedundancy(-1)
-          .setReadTimeout(1000).setPingInterval(1000)
+          .setReadTimeout(10000).setPingInterval(1000)
           // retryInterval should be more so that only registerInterste thread
           // will initiate failover
           // .setRetryInterval(20000)
@@ -948,7 +948,7 @@ public class HAInterestTestCase extends JUnit4DistributedTestCase {
     PoolImpl p;
     try {
       p = (PoolImpl) PoolManager.createFactory().addServer(host, PORT1).addServer(host, PORT2)
-          .setSubscriptionEnabled(true).setSubscriptionRedundancy(-1).setReadTimeout(1000)
+          .setSubscriptionEnabled(true).setSubscriptionRedundancy(-1).setReadTimeout(10000)
           .setSocketBufferSize(32768).setMinConnections(6).setPingInterval(200)
           // .setRetryInterval(200)
           // retryAttempts 3
@@ -975,7 +975,7 @@ public class HAInterestTestCase extends JUnit4DistributedTestCase {
     props.setProperty(LOCATORS, "");
     new HAInterestTestCase().createCache(props);
     PoolImpl p = (PoolImpl) PoolManager.createFactory().addServer(hostName, PORT1)
-        .setSubscriptionEnabled(true).setSubscriptionRedundancy(-1).setReadTimeout(1000)
+        .setSubscriptionEnabled(true).setSubscriptionRedundancy(-1).setReadTimeout(10000)
         // .setRetryInterval(20)
         .create("HAInterestBaseTestPool");
     AttributesFactory factory = new AttributesFactory();


[09/32] geode git commit: Undoing spark connector changes related to geode 1.2

Posted by kl...@apache.org.
Undoing spark connector changes related to geode 1.2

The spark connector builds against geode 1.0.0-incubating. The spark
connector was no longer compiling with these changes.


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/0dae918d
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/0dae918d
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/0dae918d

Branch: refs/heads/feature/GEODE-1279
Commit: 0dae918df3b4c7bc53abdbf57c92dddba8e814f2
Parents: e79d27d
Author: Lynn Hughes-Godfrey <lh...@pivotal.io>
Authored: Wed May 24 15:31:09 2017 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Thu May 25 10:36:36 2017 -0700

----------------------------------------------------------------------
 .../geodefunctions/RetrieveRegionFunction.java  | 20 ++++++++++----------
 .../internal/DefaultGeodeConnection.scala       |  4 ++--
 2 files changed, 12 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/0dae918d/geode-spark-connector/geode-functions/src/main/java/org/apache/geode/spark/connector/internal/geodefunctions/RetrieveRegionFunction.java
----------------------------------------------------------------------
diff --git a/geode-spark-connector/geode-functions/src/main/java/org/apache/geode/spark/connector/internal/geodefunctions/RetrieveRegionFunction.java b/geode-spark-connector/geode-functions/src/main/java/org/apache/geode/spark/connector/internal/geodefunctions/RetrieveRegionFunction.java
index 7407cc8..096e4d5 100644
--- a/geode-spark-connector/geode-functions/src/main/java/org/apache/geode/spark/connector/internal/geodefunctions/RetrieveRegionFunction.java
+++ b/geode-spark-connector/geode-functions/src/main/java/org/apache/geode/spark/connector/internal/geodefunctions/RetrieveRegionFunction.java
@@ -16,24 +16,25 @@
  */
 package org.apache.geode.spark.connector.internal.geodefunctions;
 
+import java.util.Iterator;
+import org.apache.logging.log4j.Logger;
+
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheFactory;
-import org.apache.geode.cache.Region;
-import org.apache.geode.cache.execute.Function;
-import org.apache.geode.cache.execute.FunctionContext;
 import org.apache.geode.cache.execute.FunctionException;
-import org.apache.geode.cache.partition.PartitionRegionHelper;
 import org.apache.geode.cache.query.Query;
 import org.apache.geode.cache.query.QueryService;
 import org.apache.geode.cache.query.SelectResults;
 import org.apache.geode.cache.query.Struct;
+import org.apache.geode.internal.cache.*;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.execute.Function;
+import org.apache.geode.cache.execute.FunctionContext;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
 import org.apache.geode.internal.cache.execute.InternalRegionFunctionContext;
 import org.apache.geode.internal.cache.execute.InternalResultSender;
 import org.apache.geode.internal.cache.partitioned.PREntriesIterator;
 import org.apache.geode.internal.logging.LogService;
-import org.apache.logging.log4j.Logger;
-
-import java.util.Iterator;
 
 /**
  * GemFire function that is used by `SparkContext.geodeRegion(regionPath, whereClause)`
@@ -84,11 +85,10 @@ public class RetrieveRegionFunction implements Function {
     InternalRegionFunctionContext irfc = (InternalRegionFunctionContext) context;
     LocalRegion localRegion = (LocalRegion) irfc.getDataSet();
     boolean partitioned = localRegion.getDataPolicy().withPartitioning();
-    if (StringUtils.isBlank(where)) {
+    if (where.trim().isEmpty())
       retrieveFullRegion(irfc, partitioned, taskDesc);
-    } else {
+    else
       retrieveRegionWithWhereClause(irfc, localRegion, partitioned, where, taskDesc);
-    }
   }
 
   /** ------------------------------------------ */

http://git-wip-us.apache.org/repos/asf/geode/blob/0dae918d/geode-spark-connector/geode-spark-connector/src/main/scala/org/apache/geode/spark/connector/internal/DefaultGeodeConnection.scala
----------------------------------------------------------------------
diff --git a/geode-spark-connector/geode-spark-connector/src/main/scala/org/apache/geode/spark/connector/internal/DefaultGeodeConnection.scala b/geode-spark-connector/geode-spark-connector/src/main/scala/org/apache/geode/spark/connector/internal/DefaultGeodeConnection.scala
index b5dcf1d..670a3f8 100644
--- a/geode-spark-connector/geode-spark-connector/src/main/scala/org/apache/geode/spark/connector/internal/DefaultGeodeConnection.scala
+++ b/geode-spark-connector/geode-spark-connector/src/main/scala/org/apache/geode/spark/connector/internal/DefaultGeodeConnection.scala
@@ -129,7 +129,7 @@ private[connector] class DefaultGeodeConnection (
     val collector = new StructStreamingResultCollector(desc)
         // RetrieveRegionResultCollector[(K, V)]
     import scala.collection.JavaConversions.setAsJavaSet
-    val exec = FunctionService.onRegion(region).setArguments(args).withCollector(collector).asInstanceOf[InternalExecution]
+    val exec = FunctionService.onRegion(region).withArgs(args).withCollector(collector).asInstanceOf[InternalExecution]
       .withBucketFilter(split.bucketSet.map(Integer.valueOf))
     exec.setWaitOnExceptionFlag(true)
     exec.execute(RetrieveRegionFunction.ID)
@@ -144,7 +144,7 @@ private[connector] class DefaultGeodeConnection (
     val args: Array[String] = Array[String](queryString, bucketSet.toString)
     val exec = FunctionService.onRegion(region).withCollector(collector).asInstanceOf[InternalExecution]
       .withBucketFilter(bucketSet.map(Integer.valueOf))
-      .setArguments(args)
+      .withArgs(args)
     exec.execute(QueryFunction.ID)
     collector.getResult
   }


[21/32] geode git commit: GEODE-2960 : Trim field parameter values from create lucene index.

Posted by kl...@apache.org.
GEODE-2960 : Trim field parameter values from create lucene index.

	* Added logic to trim leading and trailing spaces from values provided against 'field'.
	* Modified existing test case to verify changes.

	This closes #541


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/d50489c6
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/d50489c6
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/d50489c6

Branch: refs/heads/feature/GEODE-1279
Commit: d50489c6c526ab996c4ff8883283a6e8f7a228a1
Parents: d3543d2
Author: Deepak Dixit <de...@gmail.com>
Authored: Fri May 26 16:59:41 2017 +0530
Committer: nabarun <nn...@pivotal.io>
Committed: Tue May 30 11:59:34 2017 -0700

----------------------------------------------------------------------
 .../geode/cache/lucene/internal/cli/LuceneIndexCommands.java  | 7 ++++++-
 .../configuration/LuceneClusterConfigurationDUnitTest.java    | 2 +-
 2 files changed, 7 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/d50489c6/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommands.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommands.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommands.java
index 9317e2e..2fa9356 100755
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommands.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommands.java
@@ -53,6 +53,7 @@ import org.springframework.shell.core.annotation.CliCommand;
 import org.springframework.shell.core.annotation.CliOption;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashSet;
 import java.util.LinkedHashSet;
 import java.util.List;
@@ -184,7 +185,11 @@ public class LuceneIndexCommands extends AbstractCommandsSupport {
     this.securityService.authorizeRegionManage(regionPath);
     try {
       final InternalCache cache = getCache();
-      LuceneIndexInfo indexInfo = new LuceneIndexInfo(indexName, regionPath, fields, analyzers);
+      // trim fields for any leading trailing spaces.
+      String[] trimmedFields =
+          Arrays.stream(fields).map(field -> field.trim()).toArray(size -> new String[size]);
+      LuceneIndexInfo indexInfo =
+          new LuceneIndexInfo(indexName, regionPath, trimmedFields, analyzers);
       final ResultCollector<?, ?> rc =
           this.executeFunctionOnAllMembers(createIndexFunction, indexInfo);
       final List<CliFunctionResult> funcResults = (List<CliFunctionResult>) rc.getResult();

http://git-wip-us.apache.org/repos/asf/geode/blob/d50489c6/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java
index 7acff1b..867dc2e 100755
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/configuration/LuceneClusterConfigurationDUnitTest.java
@@ -259,7 +259,7 @@ public class LuceneClusterConfigurationDUnitTest {
     CommandStringBuilder csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
     csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, indexName);
     csb.addOption(LuceneCliStrings.LUCENE__REGION_PATH, REGION_NAME);
-    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "'field1, field2, field3'");
     gfshConnector.executeAndVerifyCommand(csb.toString());
   }
 


[18/32] geode git commit: GEODE-2939: Make sure bucket region initiate event tracker from the image provider.

Posted by kl...@apache.org.
GEODE-2939: Make sure bucket region initiate event tracker from the image provider.

Save all event states from remote processes.
Initiate event tracker from the image provider only.


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/56f976c8
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/56f976c8
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/56f976c8

Branch: refs/heads/feature/GEODE-1279
Commit: 56f976c89fabed58a086a845593efc2ef6e75114
Parents: 29ea88a
Author: eshu <es...@pivotal.io>
Authored: Thu May 25 16:38:55 2017 -0700
Committer: eshu <es...@pivotal.io>
Committed: Thu May 25 17:14:09 2017 -0700

----------------------------------------------------------------------
 .../geode/internal/cache/BucketRegion.java      | 29 ++++++++
 .../cache/CacheDistributionAdvisee.java         |  8 ++
 .../internal/cache/CreateRegionProcessor.java   | 36 ++++-----
 .../geode/internal/cache/DistributedRegion.java |  9 +++
 .../geode/internal/cache/EventTracker.java      |  3 +-
 .../internal/cache/InitialImageOperation.java   |  3 +
 .../internal/cache/EventTrackerDUnitTest.java   | 78 ++++++++++++++++++++
 7 files changed, 147 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/56f976c8/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
index 7bfffb7..31b341a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
@@ -29,8 +29,11 @@ import org.apache.geode.internal.Assert;
 import org.apache.geode.internal.HeapDataOutputStream;
 import org.apache.geode.internal.Version;
 import org.apache.geode.internal.cache.BucketAdvisor.BucketProfile;
+import org.apache.geode.internal.cache.CreateRegionProcessor.CreateRegionReplyProcessor;
+import org.apache.geode.internal.cache.EventTracker.EventSeqnoHolder;
 import org.apache.geode.internal.cache.FilterRoutingInfo.FilterInfo;
 import org.apache.geode.internal.cache.control.MemoryEvent;
+import org.apache.geode.internal.cache.ha.ThreadIdentifier;
 import org.apache.geode.internal.cache.partitioned.Bucket;
 import org.apache.geode.internal.cache.partitioned.DestroyMessage;
 import org.apache.geode.internal.cache.partitioned.InvalidateMessage;
@@ -92,6 +95,8 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   private final AtomicLong numOverflowBytesOnDisk = new AtomicLong();
   private final AtomicLong numEntriesInVM = new AtomicLong();
   private final AtomicLong evictions = new AtomicLong();
+  // For GII
+  private CreateRegionReplyProcessor createRegionReplyProcessor;
 
   /**
    * Contains size in bytes of the values stored in theRealMap. Sizes are tallied during put and
@@ -281,6 +286,30 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   }
 
   @Override
+  public void registerCreateRegionReplyProcessor(CreateRegionReplyProcessor processor) {
+    this.createRegionReplyProcessor = processor;
+  }
+
+  @Override
+  protected void recordEventStateFromImageProvider(InternalDistributedMember provider) {
+    if (this.createRegionReplyProcessor != null) {
+      Map<ThreadIdentifier, EventSeqnoHolder> providerEventStates =
+          this.createRegionReplyProcessor.getEventState(provider);
+      if (providerEventStates != null) {
+        recordEventState(provider, providerEventStates);
+      } else {
+        // Does not see this to happen. Just in case we get gii from a node
+        // that was not in the cluster originally when we sent
+        // createRegionMessage (its event tracker was saved),
+        // but later available before we could get gii from anyone else.
+        // This will not cause data inconsistent issue. Log this message for debug purpose.
+        logger.info("Could not initiate event tracker from GII provider {}", provider);
+      }
+      this.createRegionReplyProcessor = null;
+    }
+  }
+
+  @Override
   protected CacheDistributionAdvisor createDistributionAdvisor(
       InternalRegionArguments internalRegionArgs) {
     return internalRegionArgs.getBucketAdvisor();

http://git-wip-us.apache.org/repos/asf/geode/blob/56f976c8/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java
index e4a7957..d933019 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java
@@ -17,6 +17,7 @@ package org.apache.geode.internal.cache;
 import org.apache.geode.cache.RegionAttributes;
 import org.apache.geode.distributed.internal.DistributionAdvisee;
 import org.apache.geode.internal.cache.CacheDistributionAdvisor.CacheProfile;
+import org.apache.geode.internal.cache.CreateRegionProcessor.CreateRegionReplyProcessor;
 
 /**
  * Distributed cache object (typically a <code>Region</code>) which uses a
@@ -54,4 +55,11 @@ public interface CacheDistributionAdvisee extends DistributionAdvisee {
    * @param profile the remote member's profile
    */
   public void remoteRegionInitialized(CacheProfile profile);
+
+  /**
+   * Allow this advisee to know the CreateRegionReplyProcessor that is creating it.
+   * 
+   * @param processor the CreateRegionReplyProcessor that is creating the advisee
+   */
+  default public void registerCreateRegionReplyProcessor(CreateRegionReplyProcessor processor) {}
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/56f976c8/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
index c1d1e77..1e38065 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
@@ -21,6 +21,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.logging.log4j.Logger;
 
@@ -48,6 +49,8 @@ import org.apache.geode.internal.Assert;
 import org.apache.geode.internal.InternalDataSerializer;
 import org.apache.geode.internal.cache.CacheDistributionAdvisor.CacheProfile;
 import org.apache.geode.internal.cache.CacheDistributionAdvisor.InitialImageAdvice;
+import org.apache.geode.internal.cache.EventTracker.EventSeqnoHolder;
+import org.apache.geode.internal.cache.ha.ThreadIdentifier;
 import org.apache.geode.internal.cache.partitioned.Bucket;
 import org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException;
 import org.apache.geode.internal.cache.partitioned.RegionAdvisor;
@@ -96,6 +99,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
       }
 
       CreateRegionReplyProcessor replyProc = new CreateRegionReplyProcessor(recps);
+      newRegion.registerCreateRegionReplyProcessor(replyProc);
 
       boolean useMcast = false; // multicast is disabled for this message for now
       CreateRegionMessage msg = getCreateRegionMessage(recps, replyProc, useMcast);
@@ -199,17 +203,16 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
           .getDistributedSystem(), members);
     }
 
-    /**
-     * guards application of event state to the region so that we deserialize and apply event state
-     * only once
-     */
-    private Object eventStateLock = new Object();
-
-    /** whether event state has been recorded in the region */
-    private boolean eventStateRecorded = false;
+    private final Map<DistributedMember, Map<ThreadIdentifier, EventSeqnoHolder>> remoteEventStates =
+        new ConcurrentHashMap<>();
 
     private boolean allMembersSkippedChecks = true;
 
+    public Map<ThreadIdentifier, EventSeqnoHolder> getEventState(
+        InternalDistributedMember provider) {
+      return this.remoteEventStates.get(provider);
+    }
+
     /**
      * true if all members skipped CreateRegionMessage#checkCompatibility(), in which case
      * CreateRegionMessage should be retried.
@@ -218,6 +221,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
       return this.allMembersSkippedChecks;
     }
 
+    @SuppressWarnings("unchecked")
     @Override
     public void process(DistributionMessage msg) {
       Assert.assertTrue(msg instanceof CreateRegionReplyMessage,
@@ -246,17 +250,13 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
             RegionAdvisor ra = (RegionAdvisor) cda;
             ra.putBucketRegionProfiles(reply.bucketProfiles);
           }
-          if (reply.eventState != null && lr.hasEventTracker()) {
-            synchronized (eventStateLock) {
-              if (!this.eventStateRecorded) {
-                this.eventStateRecorded = true;
-                Object eventState = null;
-                eventState = reply.eventState;
-                lr.recordEventState(reply.getSender(), (Map) eventState);
-              }
-            }
+
+          // Save all event states, need to initiate the event tracker from the GII provider
+          if (reply.eventState != null) {
+            remoteEventStates.put(reply.getSender(),
+                (Map<ThreadIdentifier, EventSeqnoHolder>) reply.eventState);
           }
-          reply.eventState = null;
+
           if (lr.isUsedForPartitionedRegionBucket()) {
             ((BucketRegion) lr).updateEventSeqNum(reply.seqKeyForWan);
           }

http://git-wip-us.apache.org/repos/asf/geode/blob/56f976c8/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
index 650fe2a..9df64d0 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegion.java
@@ -261,6 +261,15 @@ public class DistributedRegion extends LocalRegion implements CacheDistributionA
   }
 
   /**
+   * Record the event state from image provider
+   * 
+   * @param provider the member that provided the initial image and event state
+   */
+  protected void recordEventStateFromImageProvider(InternalDistributedMember provider) {
+    // No Op. Only Bucket region will initiate event states
+  }
+
+  /**
    * Intended for used during construction of a DistributedRegion
    * 
    * @return the advisor to be used by the region

http://git-wip-us.apache.org/repos/asf/geode/blob/56f976c8/geode-core/src/main/java/org/apache/geode/internal/cache/EventTracker.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/EventTracker.java b/geode-core/src/main/java/org/apache/geode/internal/cache/EventTracker.java
index 2c86aed..b919043 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/EventTracker.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/EventTracker.java
@@ -99,7 +99,8 @@ public class EventTracker {
   String name;
 
   /**
-   * whether or not this tracker has been initialized with state from another process
+   * whether or not this tracker has been initialized to allow entry operation. replicate region
+   * does not initiate event tracker from its replicates.
    */
   volatile boolean initialized;
 

http://git-wip-us.apache.org/repos/asf/geode/blob/56f976c8/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java
index 82df980..f8e9d0f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java
@@ -231,11 +231,13 @@ public class InitialImageOperation {
       }
     }
     long giiStart = this.region.getCachePerfStats().startGetInitialImage();
+    InternalDistributedMember provider = null;
 
     for (Iterator itr = recipients.iterator(); !this.gotImage && itr.hasNext();) {
       // if we got a partial image from the previous recipient, then clear it
 
       InternalDistributedMember recipient = (InternalDistributedMember) itr.next();
+      provider = recipient;
 
       // In case of HARegion, before getting the region snapshot(image) get the filters
       // registered by the associated client and apply them.
@@ -546,6 +548,7 @@ public class InitialImageOperation {
     } // for
 
     if (this.gotImage) {
+      this.region.recordEventStateFromImageProvider(provider);
       this.region.getCachePerfStats().endGetInitialImage(giiStart);
       if (this.isDeltaGII) {
         this.region.getCachePerfStats().incDeltaGIICompleted();

http://git-wip-us.apache.org/repos/asf/geode/blob/56f976c8/geode-core/src/test/java/org/apache/geode/internal/cache/EventTrackerDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/EventTrackerDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/EventTrackerDUnitTest.java
index 3faf41f..77c0998 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/EventTrackerDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/EventTrackerDUnitTest.java
@@ -19,8 +19,11 @@ import static org.junit.Assert.*;
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
 
+import org.awaitility.Awaitility;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -405,4 +408,79 @@ public class EventTrackerDUnitTest extends JUnit4CacheTestCase {
   protected static int getCacheServerPort() {
     return cacheServerPort;
   }
+
+  /**
+   * Tests event track is initialized after gii
+   */
+  @Test
+  public void testEventTrackerIsInitalized() throws CacheException {
+    Host host = Host.getHost(0);
+    VM vm0 = host.getVM(0);
+    VM vm1 = host.getVM(1);
+    VM vm2 = host.getVM(2);
+
+    createPRInVMs(vm0, vm1, vm2);
+
+    createPR();
+
+    doPutsInVMs(vm0, vm1, vm2);
+
+    doPuts();
+
+    verifyEventTrackerContent();
+
+    // close the region
+    getCache().getRegion(getName()).close();
+
+    // create the region again.
+    createPR();
+
+    for (int i = 0; i < 12; i++) {
+      waitEntryIsLocal(i);
+    }
+
+    // verify event track initialized after create region
+    verifyEventTrackerContent();
+
+  }
+
+  private void waitEntryIsLocal(int i) {
+    Awaitility.await().pollInterval(10, TimeUnit.MILLISECONDS).pollDelay(10, TimeUnit.MILLISECONDS)
+        .atMost(30, TimeUnit.SECONDS)
+        .until(() -> getCache().getRegion(getName()).getEntry(i) != null);
+  }
+
+  private void verifyEventTrackerContent() {
+    PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(getName());
+    BucketRegion br = pr.getDataStore().getLocalBucketById(0);
+    Map<?, ?> eventStates = br.getEventState();
+    assertTrue(eventStates.size() == 4);
+  }
+
+  public void createPRInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> createPR());
+    }
+  }
+
+  private void createPR() {
+    PartitionAttributesFactory paf =
+        new PartitionAttributesFactory().setRedundantCopies(3).setTotalNumBuckets(4);
+    RegionFactory fact = getCache().createRegionFactory(RegionShortcut.PARTITION)
+        .setPartitionAttributes(paf.create());
+    fact.create(getName());
+  }
+
+  public void doPutsInVMs(VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> doPuts());
+    }
+  }
+
+  private void doPuts() {
+    Region region = getCache().getRegion(getName());
+    for (int i = 0; i < 12; i++) {
+      region.put(i, i);
+    }
+  }
 }


[15/32] geode git commit: GEODE-2993: Rethrow CacheClosedException from AbstractGatewaySender.distribute()

Posted by kl...@apache.org.
GEODE-2993: Rethrow CacheClosedException from AbstractGatewaySender.distribute()

- rethrow CacheClosedException
- Add test for cache close while enqueuing event in AEQ.
- Add cleanup of disk dirs created by test.


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/0fe0a106
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/0fe0a106
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/0fe0a106

Branch: refs/heads/feature/GEODE-1279
Commit: 0fe0a1061065f07d4b734d7055f56ad1635f1a2a
Parents: c1ab3ff
Author: Lynn Hughes-Godfrey <lh...@pivotal.io>
Authored: Thu May 25 15:31:16 2017 -0700
Committer: Lynn Hughes-Godfrey <lh...@pivotal.io>
Committed: Thu May 25 15:31:16 2017 -0700

----------------------------------------------------------------------
 .../cache/wan/AbstractGatewaySender.java        |   1 +
 .../cache/wan/AsyncEventQueueTestBase.java      |   2 +
 .../asyncqueue/AsyncEventListenerDUnitTest.java | 102 +++++++++++++++++++
 3 files changed, 105 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/0fe0a106/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java
index 7ed9b51..c38d547 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java
@@ -973,6 +973,7 @@ public abstract class AbstractGatewaySender implements GatewaySender, Distributi
           ev.enqueueEvent(operation, clonedEvent, substituteValue);
         } catch (CancelException e) {
           logger.debug("caught cancel exception", e);
+          throw e;
         } catch (RegionDestroyedException e) {
           logger.warn(LocalizedMessage.create(
               LocalizedStrings.GatewayImpl_0_AN_EXCEPTION_OCCURRED_WHILE_QUEUEING_1_TO_PERFORM_OPERATION_2_FOR_3,

http://git-wip-us.apache.org/repos/asf/geode/blob/0fe0a106/geode-core/src/test/java/org/apache/geode/internal/cache/wan/AsyncEventQueueTestBase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/AsyncEventQueueTestBase.java b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/AsyncEventQueueTestBase.java
index 6fe7ee9..dc7a218 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/AsyncEventQueueTestBase.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/AsyncEventQueueTestBase.java
@@ -98,6 +98,7 @@ import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.Wait;
 import org.apache.geode.test.dunit.WaitCriterion;
+import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
 import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
 import org.apache.geode.test.junit.categories.DistributedTest;
 
@@ -1555,6 +1556,7 @@ public class AsyncEventQueueTestBase extends JUnit4DistributedTestCase {
 
   public static void cleanupVM() throws IOException {
     closeCache();
+    JUnit4CacheTestCase.cleanDiskDirs();
   }
 
   public static void closeCache() throws IOException {

http://git-wip-us.apache.org/repos/asf/geode/blob/0fe0a106/geode-core/src/test/java/org/apache/geode/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
index 3dd0550..795af36 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
@@ -19,6 +19,7 @@ import static org.junit.Assert.*;
 import static org.mockito.Matchers.any;
 
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -29,11 +30,18 @@ import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 import java.util.stream.LongStream;
 
+import org.apache.geode.cache.AttributesFactory;
+import org.apache.geode.cache.CacheClosedException;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.wan.GatewayEventFilter;
+import org.apache.geode.cache.wan.GatewayQueueEvent;
+import org.apache.geode.internal.cache.wan.MyAsyncEventListener;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.DataPolicy;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionDestroyedException;
 import org.apache.geode.cache.asyncqueue.AsyncEvent;
@@ -42,11 +50,14 @@ import org.apache.geode.cache.asyncqueue.AsyncEventQueueFactory;
 import org.apache.geode.cache.asyncqueue.internal.AsyncEventQueueFactoryImpl;
 import org.apache.geode.cache.asyncqueue.internal.AsyncEventQueueImpl;
 import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.cache.persistence.PartitionOfflineException;
 import org.apache.geode.cache.wan.GatewaySender.OrderPolicy;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.internal.AvailablePortHelper;
+import org.apache.geode.internal.cache.ForceReattemptException;
 import org.apache.geode.internal.cache.wan.AsyncEventQueueTestBase;
+import org.apache.geode.test.dunit.IgnoredException;
 import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.SerializableRunnableIF;
 import org.apache.geode.test.dunit.VM;
@@ -1674,6 +1685,66 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     Awaitility.waitAtMost(10000, TimeUnit.MILLISECONDS).until(() -> getBucketMoved(vm2, "ln"));
   }
 
+  @Test
+  public void testCacheClosedBeforeAEQWrite() {
+    Integer lnPort =
+        (Integer) vm0.invoke(() -> AsyncEventQueueTestBase.createFirstLocatorWithDSId(1));
+
+    vm1.invoke(createCacheRunnable(lnPort));
+    vm2.invoke(createCacheRunnable(lnPort));
+    vm3.invoke(createCacheRunnable(lnPort));
+    final DistributedMember member1 =
+        vm1.invoke(() -> cache.getDistributedSystem().getDistributedMember());
+
+    vm1.invoke(() -> addAEQWithCacheCloseFilter());
+    vm2.invoke(() -> addAEQWithCacheCloseFilter());
+
+    vm1.invoke(() -> createPersistentPartitionRegion());
+    vm2.invoke(() -> createPersistentPartitionRegion());
+    vm3.invoke(() -> {
+      AttributesFactory fact = new AttributesFactory();
+
+      PartitionAttributesFactory pfact = new PartitionAttributesFactory();
+      pfact.setTotalNumBuckets(16);
+      pfact.setLocalMaxMemory(0);
+      fact.setPartitionAttributes(pfact.create());
+      fact.setOffHeap(isOffHeap());
+      Region r = cache.createRegionFactory(fact.create()).addAsyncEventQueueId("ln")
+          .create(getTestMethodName() + "_PR");
+
+    });
+
+    vm3.invoke(() -> {
+      Region r = cache.getRegion(Region.SEPARATOR + getTestMethodName() + "_PR");
+      r.put(1, 1);
+      r.put(2, 2);
+      // This will trigger the gateway event filter to close the cache
+      try {
+        r.removeAll(Collections.singleton(1));
+        fail("Should have received a partition offline exception");
+      } catch (PartitionOfflineException expected) {
+
+      }
+    });
+  }
+
+  private void createPersistentPartitionRegion() {
+    AttributesFactory fact = new AttributesFactory();
+
+    PartitionAttributesFactory pfact = new PartitionAttributesFactory();
+    pfact.setTotalNumBuckets(16);
+    fact.setPartitionAttributes(pfact.create());
+    fact.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
+    fact.setOffHeap(isOffHeap());
+    Region r = cache.createRegionFactory(fact.create()).addAsyncEventQueueId("ln")
+        .create(getTestMethodName() + "_PR");
+  }
+
+  private void addAEQWithCacheCloseFilter() {
+    cache.createAsyncEventQueueFactory().addGatewayEventFilter(new CloseCacheGatewayFilter())
+        .setPersistent(true).setParallel(true).create("ln", new MyAsyncEventListener());
+  }
+
   private static Set<Object> getKeysSeen(VM vm, String asyncEventQueueId) {
     return vm.invoke(() -> {
       final BucketMovingAsyncEventListener listener =
@@ -1690,6 +1761,37 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     });
   }
 
+  private final class CloseCacheGatewayFilter implements GatewayEventFilter {
+    @Override
+    public boolean beforeEnqueue(final GatewayQueueEvent event) {
+      if (event.getOperation().isRemoveAll()) {
+        new Thread(() -> cache.close()).start();
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+          // ignore
+        }
+        throw new CacheClosedException();
+      }
+      return true;
+    }
+
+    @Override
+    public boolean beforeTransmit(final GatewayQueueEvent event) {
+      return false;
+    }
+
+    @Override
+    public void afterAcknowledgement(final GatewayQueueEvent event) {
+
+    }
+
+    @Override
+    public void close() {
+
+    }
+  }
+
   private static class BucketMovingAsyncEventListener implements AsyncEventListener {
     private final DistributedMember destination;
     private boolean moved;


[16/32] geode git commit: GEODE-2950: Updated error messages

Posted by kl...@apache.org.
GEODE-2950: Updated error messages

	* Different error messages are displayed when validating region names versus indexes.
	* Added flexible enum allowing for expanded error messages and name validation.

	This closes #540


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/5ab4a693
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/5ab4a693
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/5ab4a693

Branch: refs/heads/feature/GEODE-1279
Commit: 5ab4a69378c697fdf050048165e2945a4b028eb7
Parents: 0fe0a10
Author: David Anuta <da...@gmail.com>
Authored: Thu May 25 15:18:40 2017 -0700
Committer: nabarun <nn...@pivotal.io>
Committed: Thu May 25 16:14:30 2017 -0700

----------------------------------------------------------------------
 .../lucene/internal/LuceneServiceImpl.java      | 58 +++++++++++---------
 .../functions/LuceneCreateIndexFunction.java    |  7 ++-
 .../cli/LuceneIndexCommandsDUnitTest.java       | 10 ++--
 3 files changed, 43 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/5ab4a693/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
index c0d6266..23b6925 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
@@ -128,34 +128,42 @@ public class LuceneServiceImpl implements InternalLuceneService {
     return getUniqueIndexName(indexName, regionPath) + regionSuffix;
   }
 
-  public static void validateCreateIndexCommandParams(String name, boolean isRegionPath) {
-    if (name == null) {
-      throw new IllegalArgumentException(
-          LocalizedStrings.LocalRegion_NAME_CANNOT_BE_NULL.toLocalizedString());
-    }
-    if (name.isEmpty()) {
-      throw new IllegalArgumentException(
-          LocalizedStrings.LocalRegion_NAME_CANNOT_BE_EMPTY.toLocalizedString());
-    }
+  public enum validateCommandParameters {
+    REGION_PATH, INDEX_NAME;
 
-    if (name.startsWith("__")) {
-      throw new IllegalArgumentException(
-          "Parameter names may not begin with a double-underscore: " + name);
-    }
+    public void validateName(String name) {
+      if (name == null) {
+        throw new IllegalArgumentException(
+            LocalizedStrings.LocalRegion_NAME_CANNOT_BE_NULL.toLocalizedString());
+      }
+      if (name.isEmpty()) {
+        throw new IllegalArgumentException(
+            LocalizedStrings.LocalRegion_NAME_CANNOT_BE_EMPTY.toLocalizedString());
+      }
 
-    final Pattern NAME_PATTERN;
-    if (isRegionPath) {
-      NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_./]+");
-    } else {
-      NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_.]+");
-    }
+      boolean iae = false;
+      String msg =
+          " names may only be alphanumeric, must not begin with double-underscores, but can contain hyphens";
+      Matcher matcher = null;
+      switch (this) {
+        case REGION_PATH:
+          matcher = Pattern.compile("[aA-zZ0-9-_./]+").matcher(name);
+          msg = "Region" + msg + ", underscores, or forward slashes: ";
+          iae = name.startsWith("__") || !matcher.matches();
+          break;
+        case INDEX_NAME:
+          matcher = Pattern.compile("[aA-zZ0-9-_.]+").matcher(name);
+          msg = "Index" + msg + " or underscores: ";
+          iae = name.startsWith("__") || !matcher.matches();
+          break;
+        default:
+          throw new IllegalArgumentException("Illegal option for validateName function");
+      }
 
-    // Ensure the region only contains valid characters
-    Matcher matcher = NAME_PATTERN.matcher(name);
-    if (!matcher.matches()) {
-      throw new IllegalArgumentException(
-          "Parameter names may only be alphanumeric, though they can contain hyphens or underscores: "
-              + name);
+      // Ensure the region only contains valid characters
+      if (iae) {
+        throw new IllegalArgumentException(msg + name);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/5ab4a693/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
index 26ac0e2..d49f7f9 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
@@ -15,7 +15,8 @@
 
 package org.apache.geode.cache.lucene.internal.cli.functions;
 
-import static org.apache.geode.cache.lucene.internal.LuceneServiceImpl.validateCreateIndexCommandParams;
+import static org.apache.geode.cache.lucene.internal.LuceneServiceImpl.validateCommandParameters.INDEX_NAME;
+import static org.apache.geode.cache.lucene.internal.LuceneServiceImpl.validateCommandParameters.REGION_PATH;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.geode.cache.Cache;
@@ -67,7 +68,7 @@ public class LuceneCreateIndexFunction extends FunctionAdapter implements Intern
       memberId = cache.getDistributedSystem().getDistributedMember().getId();
       LuceneService service = LuceneServiceProvider.get(cache);
 
-      validateCreateIndexCommandParams(indexInfo.getIndexName(), false);
+      INDEX_NAME.validateName(indexInfo.getIndexName());
 
       String[] fields = indexInfo.getSearchableFieldNames();
       String[] analyzerName = indexInfo.getFieldAnalyzers();
@@ -86,7 +87,7 @@ public class LuceneCreateIndexFunction extends FunctionAdapter implements Intern
         }
       }
 
-      validateCreateIndexCommandParams(indexInfo.getRegionPath(), true);
+      REGION_PATH.validateName(indexInfo.getRegionPath());
       indexFactory.create(indexInfo.getIndexName(), indexInfo.getRegionPath());
 
       // TODO - update cluster configuration by returning a valid XmlEntity

http://git-wip-us.apache.org/repos/asf/geode/blob/5ab4a693/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
index 5e9c4f9..5cbe31c 100755
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
@@ -210,7 +210,8 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
     csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
 
     String resultAsString = executeCommandAndLogResult(csb);
-    assertTrue(resultAsString.contains("Parameter names may not begin with a double-underscore:"));
+    assertTrue(resultAsString.contains(
+        "Region names may only be alphanumeric, must not begin with double-underscores, but can contain hyphens, underscores, or forward slashes:"));
 
     csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
     csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, INDEX_NAME);
@@ -219,7 +220,7 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
 
     resultAsString = executeCommandAndLogResult(csb);
     assertTrue(resultAsString.contains(
-        "Parameter names may only be alphanumeric, though they can contain hyphens or underscores:"));
+        "Region names may only be alphanumeric, must not begin with double-underscores, but can contain hyphens, underscores, or forward slashes:"));
 
     csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
     csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, "\'__\'");
@@ -227,7 +228,8 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
     csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
 
     resultAsString = executeCommandAndLogResult(csb);
-    assertTrue(resultAsString.contains("Parameter names may not begin with a double-underscore:"));
+    assertTrue(resultAsString.contains(
+        "Index names may only be alphanumeric, must not begin with double-underscores, but can contain hyphens or underscores:"));
 
     csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
     csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, "\' @@@*%\'");
@@ -236,7 +238,7 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
 
     resultAsString = executeCommandAndLogResult(csb);
     assertTrue(resultAsString.contains(
-        "Parameter names may only be alphanumeric, though they can contain hyphens or underscores:"));
+        "Index names may only be alphanumeric, must not begin with double-underscores, but can contain hyphens or underscores:"));
   }
 
   @Test


[22/32] geode git commit: GEODE-2957: Create Lucene index analyzer help updated to include keyword DEFAULT

Posted by kl...@apache.org.
GEODE-2957: Create Lucene index analyzer help updated to include keyword DEFAULT


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/96665faf
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/96665faf
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/96665faf

Branch: refs/heads/feature/GEODE-1279
Commit: 96665fafbcc06948b7152ca9ad7344ab938f27ff
Parents: d50489c
Author: David Anuta <da...@gmail.com>
Authored: Tue May 30 09:27:11 2017 -0700
Committer: nabarun <nn...@pivotal.io>
Committed: Tue May 30 12:02:37 2017 -0700

----------------------------------------------------------------------
 .../apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/96665faf/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
index db9f7b9..8104b3f 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
@@ -43,7 +43,7 @@ public class LuceneCliStrings {
       "Fields on the region values which are stored in the lucene index.\nUse __REGION_VALUE_FIELD if the entire region value should be indexed.\n__REGION_VALUE_FIELD is valid only if the region values are strings or numbers.";
   public static final String LUCENE_CREATE_INDEX__ANALYZER = "analyzer";
   public static final String LUCENE_CREATE_INDEX__ANALYZER_HELP =
-      "Type of the analyzer for each field.";
+      "Type of the analyzer for each field.\nUse the case sensitive keyword DEFAULT or leave an analyzer blank to use the default standard analyzer.";
   public static final String CREATE_INDEX__SUCCESS__MSG =
       "Index successfully created with following details";
   public static final String CREATE_INDEX__FAILURE__MSG =


[05/32] geode git commit: GEODE-2955: Validating region names when creating lucene index

Posted by kl...@apache.org.
GEODE-2955: Validating region names when creating lucene index

	This closes #531


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/ee9ca4e8
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/ee9ca4e8
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/ee9ca4e8

Branch: refs/heads/feature/GEODE-1279
Commit: ee9ca4e8176183a6d0128322d21c47e5c4a0ffdf
Parents: 662358f
Author: David Anuta <da...@gmail.com>
Authored: Wed May 24 11:34:39 2017 -0700
Committer: nabarun <nn...@pivotal.io>
Committed: Wed May 24 15:25:36 2017 -0700

----------------------------------------------------------------------
 .../geode/internal/cache/LocalRegion.java       |  3 +--
 .../lucene/internal/LuceneServiceImpl.java      | 26 ++++++++++++++++++++
 .../internal/cli/LuceneIndexCommands.java       |  4 +++
 .../functions/LuceneCreateIndexFunction.java    |  4 +++
 .../cli/LuceneIndexCommandsDUnitTest.java       | 26 +++++++++++++++++++-
 5 files changed, 60 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/ee9ca4e8/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
index 4446d48..f581856 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
@@ -231,8 +231,6 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
   // package-private to avoid synthetic accessor
   static final Logger logger = LogService.getLogger();
 
-  private static final Pattern NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_.]+");
-
   /**
    * Internal interface used to simulate failures when performing entry operations
    * 
@@ -7389,6 +7387,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
           "Region names may not begin with a double-underscore: " + name);
     }
 
+    final Pattern NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_.]+");
     // Ensure the region only contains valid characters
     Matcher matcher = NAME_PATTERN.matcher(name);
     if (!matcher.matches()) {

http://git-wip-us.apache.org/repos/asf/geode/blob/ee9ca4e8/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
index afbcc40..8ce7028 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
@@ -17,6 +17,8 @@ package org.apache.geode.cache.lucene.internal;
 
 import java.util.*;
 import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.geode.cache.lucene.LuceneIndexExistsException;
 import org.apache.geode.cache.lucene.internal.distributed.LuceneQueryFunction;
@@ -126,6 +128,30 @@ public class LuceneServiceImpl implements InternalLuceneService {
     return getUniqueIndexName(indexName, regionPath) + regionSuffix;
   }
 
+  public static void validateRegionName(String name) {
+    if (name == null) {
+      throw new IllegalArgumentException(
+          LocalizedStrings.LocalRegion_NAME_CANNOT_BE_NULL.toLocalizedString());
+    }
+    if (name.isEmpty()) {
+      throw new IllegalArgumentException(
+          LocalizedStrings.LocalRegion_NAME_CANNOT_BE_EMPTY.toLocalizedString());
+    }
+
+    if (name.startsWith("__")) {
+      throw new IllegalArgumentException(
+          "Region names may not begin with a double-underscore: " + name);
+    }
+
+    final Pattern NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_.]+");
+    // Ensure the region only contains valid characters
+    Matcher matcher = NAME_PATTERN.matcher(name);
+    if (!matcher.matches()) {
+      throw new IllegalArgumentException(
+          "Region names may only be alphanumeric and may contain hyphens or underscores: " + name);
+    }
+  }
+
   public void createIndex(String indexName, String regionPath,
       Map<String, Analyzer> fieldAnalyzers) {
     if (fieldAnalyzers == null || fieldAnalyzers.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/geode/blob/ee9ca4e8/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommands.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommands.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommands.java
index 5e17f6e..9317e2e 100755
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommands.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommands.java
@@ -35,6 +35,7 @@ import org.apache.geode.management.cli.CliMetaData;
 import org.apache.geode.management.cli.ConverterHint;
 import org.apache.geode.management.cli.Result;
 import org.apache.geode.management.internal.cli.CliUtil;
+import org.apache.geode.management.internal.cli.LogWrapper;
 import org.apache.geode.management.internal.cli.commands.AbstractCommandsSupport;
 import org.apache.geode.management.internal.cli.functions.CliFunctionResult;
 import org.apache.geode.management.internal.cli.i18n.CliStrings;
@@ -202,6 +203,9 @@ public class LuceneIndexCommands extends AbstractCommandsSupport {
         }
       }
       result = ResultBuilder.buildResult(tabularResult);
+    } catch (IllegalArgumentException iae) {
+      LogWrapper.getInstance().info(iae.getMessage());
+      result = ResultBuilder.createUserErrorResult(iae.getMessage());
     } catch (CommandResultException crex) {
       result = crex.getResult();
     } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/geode/blob/ee9ca4e8/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
index 9f938a5..422b1ef 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
@@ -15,6 +15,8 @@
 
 package org.apache.geode.cache.lucene.internal.cli.functions;
 
+import static org.apache.geode.cache.lucene.internal.LuceneServiceImpl.validateRegionName;
+
 import org.apache.commons.lang.StringUtils;
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheFactory;
@@ -81,6 +83,8 @@ public class LuceneCreateIndexFunction extends FunctionAdapter implements Intern
           indexFactory.addField(fields[i], analyzer);
         }
       }
+
+      validateRegionName(indexInfo.getRegionPath());
       indexFactory.create(indexInfo.getIndexName(), indexInfo.getRegionPath());
 
       // TODO - update cluster configuration by returning a valid XmlEntity

http://git-wip-us.apache.org/repos/asf/geode/blob/ee9ca4e8/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
index bc4c68e..04359a3 100755
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
@@ -198,6 +198,31 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
   }
 
   @Test
+  public void createIndexShouldNotAcceptEmptyRegionNames() {
+    final VM vm1 = Host.getHost(0).getVM(-1);
+    vm1.invoke(() -> {
+      getCache();
+    });
+
+    CommandStringBuilder csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
+    csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, INDEX_NAME);
+    csb.addOption(LuceneCliStrings.LUCENE__REGION_PATH, "\'__\'");
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
+
+    String resultAsString = executeCommandAndLogResult(csb);
+    assertTrue(resultAsString.contains("Region names may not begin with a double-underscore:"));
+
+    csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
+    csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, INDEX_NAME);
+    csb.addOption(LuceneCliStrings.LUCENE__REGION_PATH, "\' @@@*%\'");
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
+
+    resultAsString = executeCommandAndLogResult(csb);
+    assertTrue(resultAsString
+        .contains("Region names may only be alphanumeric and may contain hyphens or underscores:"));
+  }
+
+  @Test
   public void createIndexShouldTrimAnalyzerNames() throws Exception {
     final VM vm1 = Host.getHost(0).getVM(-1);
     vm1.invoke(() -> {
@@ -497,7 +522,6 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
 
     TabularResultData data = (TabularResultData) executeCommandAndGetResult(csb).getResultData();
     assertEquals(4, data.retrieveAllValues("key").size());
-
   }
 
   @Test


[14/32] geode git commit: GEODE-2977: make group/name option values consistent

Posted by kl...@apache.org.
GEODE-2977: make group/name option values consistent

* this closes #536


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/c1ab3ffe
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/c1ab3ffe
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/c1ab3ffe

Branch: refs/heads/feature/GEODE-1279
Commit: c1ab3ffecb0c0a435fab64e94a97c2efc28ab085
Parents: 7b34cfd
Author: YehEmily <em...@gmail.com>
Authored: Tue May 23 12:27:40 2017 -0700
Committer: Jinmei Liao <ji...@pivotal.io>
Committed: Thu May 25 15:02:14 2017 -0700

----------------------------------------------------------------------
 .../apache/geode/cache/AttributesFactory.java   |  17 +--
 .../geode/internal/cache/BucketRegion.java      |  22 ---
 .../apache/geode/internal/lang/StringUtils.java |   2 -
 .../geode/management/internal/cli/CliUtil.java  |  21 ---
 .../internal/cli/commands/ConfigCommands.java   |  12 +-
 .../CreateAlterDestroyRegionCommands.java       |   9 +-
 .../internal/cli/commands/DeployCommands.java   |  20 ++-
 .../cli/commands/DiskStoreCommands.java         |  18 ++-
 .../cli/commands/DurableClientCommands.java     |  56 ++++----
 .../internal/cli/commands/FunctionCommands.java |  13 +-
 .../internal/cli/commands/IndexCommands.java    |  56 ++++----
 .../cli/commands/MiscellaneousCommands.java     |  28 ++--
 .../internal/cli/commands/QueueCommands.java    |  18 ++-
 .../internal/cli/commands/RegionCommands.java   |  13 +-
 .../internal/cli/commands/WanCommands.java      | 143 ++++++++++++-------
 .../internal/cli/CliUtilDUnitTest.java          |  81 +++++------
 16 files changed, 240 insertions(+), 289 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/cache/AttributesFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/cache/AttributesFactory.java b/geode-core/src/main/java/org/apache/geode/cache/AttributesFactory.java
index 69f1087..0d59279 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/AttributesFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/AttributesFactory.java
@@ -1496,27 +1496,12 @@ public class AttributesFactory<K, V> {
             LocalizedStrings.AttributesFactory_TOTAL_SIZE_OF_PARTITION_REGION_MUST_BE_0
                 .toLocalizedString());
       }
-      // listeners are supported here as of v5.1
-      // if (attrs.getCacheListeners().length > 0) {
-      // throw new IllegalStateException(
-      // "Can not add cache listeners to RegionAttributes when PartitionAttributes are set.");
-      // }
-      // loaders are supported here as of v5.1
-      // if (attrs.getCacheLoader() != null) {
-      // throw new IllegalStateException(
-      // "Can not set CacheLoader in RegionAttributes when PartitionAttributes are set.");
-      // }
+
       if (!PartitionedRegionHelper.ALLOWED_DATA_POLICIES.contains(attrs.getDataPolicy())) {
         throw new IllegalStateException(
             LocalizedStrings.AttributesFactory_DATA_POLICIES_OTHER_THAN_0_ARE_NOT_ALLOWED_IN_PARTITIONED_REGIONS
                 .toLocalizedString(PartitionedRegionHelper.ALLOWED_DATA_POLICIES));
       }
-      // if ( attrs.getDataPolicy().isEmpty() && pa.getLocalMaxMemory() != 0) {
-      // throw new IllegalStateException(
-      // "A non-zero PartitionAttributes localMaxMemory setting is not compatible" +
-      // " with an empty DataPolicy. Please use DataPolicy.NORMAL instead.");
-      // }
-
       // fix bug #52033 by invoking getLocalMaxMemoryForValidation here
       if (((PartitionAttributesImpl) pa).getLocalMaxMemoryForValidation() < 0) {
         throw new IllegalStateException(

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
index 886d678..7bfffb7 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
@@ -1848,28 +1848,6 @@ public class BucketRegion extends DistributedRegion implements Bucket {
     // if (!anyWithRouting) {
     Set failures = this.partitionedRegion.getDistributionManager().putOutgoing(prMsg);
 
-    // } else {
-    // // Send message to each member. We set a FilterRoutingInfo serialization
-    // // target so that serialization of the PutAllData objects held in the
-    // // message will only serialize the routing entry for the message recipient
-    // Iterator rIter = recipients.iterator();
-    // failures = new HashSet();
-    // while (rIter.hasNext()){
-    // InternalDistributedMember member = (InternalDistributedMember)rIter.next();
-    // FilterRoutingInfo.setSerializationTarget(member);
-    // try {
-    // prMsg.resetRecipients();
-    // prMsg.setRecipient(member);
-    // Set fs = this.partitionedRegion.getDistributionManager().putOutgoing(prMsg);
-    // if (fs != null && !fs.isEmpty()) {
-    // failures.addAll(fs);
-    // }
-    // } finally {
-    // FilterRoutingInfo.clearSerializationTarget();
-    // }
-    // }
-    // }
-
     return failures;
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/internal/lang/StringUtils.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/lang/StringUtils.java b/geode-core/src/main/java/org/apache/geode/internal/lang/StringUtils.java
index 8a44564..298f44f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/lang/StringUtils.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/lang/StringUtils.java
@@ -65,8 +65,6 @@ public class StringUtils extends org.apache.commons.lang.StringUtils {
     return buffer.toString();
   }
 
-
-
   /**
    * Gets the value of the specified Object as a String. If the Object is null then the first
    * non-null String value from the array of default String value is returned. If the array of

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/CliUtil.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/CliUtil.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/CliUtil.java
index c63b10b..038e069 100755
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/CliUtil.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/CliUtil.java
@@ -281,27 +281,6 @@ public class CliUtil {
     return sb.toString();
   }
 
-  public static Set<DistributedMember> findMembersOrThrow(final String groups, final String members)
-      throws CommandResultException {
-
-    String[] groupsArray = (groups == null ? new String[] {} : groups.split(","));
-    String[] membersArray = (members == null ? new String[] {} : members.split(","));
-
-    return findMembersOrThrow(groupsArray, membersArray);
-  }
-
-  public static Set<DistributedMember> findMembersOrThrow(final String[] groups,
-      final String[] members) throws CommandResultException {
-
-    Set<DistributedMember> matchingMembers = findMembers(groups, members);
-    if (matchingMembers.isEmpty()) {
-      throw new CommandResultException(
-          ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE));
-    }
-
-    return matchingMembers;
-  }
-
   /**
    * Finds all Members (including both servers and locators) which belong to the given arrays of
    * groups or members.

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
index 6d3f50f..52a0a9d 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
@@ -199,18 +199,16 @@ public class ConfigCommands extends AbstractCommandsSupport {
   public Result exportConfig(
       @CliOption(key = {CliStrings.EXPORT_CONFIG__MEMBER},
           optionContext = ConverterHint.ALL_MEMBER_IDNAME,
-          help = CliStrings.EXPORT_CONFIG__MEMBER__HELP) String member,
+          help = CliStrings.EXPORT_CONFIG__MEMBER__HELP) String[] member,
       @CliOption(key = {CliStrings.EXPORT_CONFIG__GROUP}, optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.EXPORT_CONFIG__GROUP__HELP) String group,
+          help = CliStrings.EXPORT_CONFIG__GROUP__HELP) String[] group,
       @CliOption(key = {CliStrings.EXPORT_CONFIG__DIR},
           help = CliStrings.EXPORT_CONFIG__DIR__HELP) String dir) {
     InfoResultData infoData = ResultBuilder.createInfoResultData();
 
-    Set<DistributedMember> targetMembers;
-    try {
-      targetMembers = CliUtil.findMembersOrThrow(group, member);
-    } catch (CommandResultException crex) {
-      return crex.getResult();
+    Set<DistributedMember> targetMembers = CliUtil.findMembers(group, member);
+    if (targetMembers.isEmpty()) {
+      return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
     }
 
     try {

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
index b8ebc49..6e1a74e 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CreateAlterDestroyRegionCommands.java
@@ -522,11 +522,10 @@ public class CreateAlterDestroyRegionCommands extends AbstractCommandsSupport {
             new Object[] {evictionMax}));
       }
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(groups, null);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(groups, null);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       ResultCollector<?, ?> resultCollector =

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DeployCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DeployCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DeployCommands.java
index 4018beb..544a517 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DeployCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DeployCommands.java
@@ -163,11 +163,10 @@ public class DeployCommands extends AbstractCommandsSupport {
       TabularResultData tabularData = ResultBuilder.createTabularResultData();
       boolean accumulatedData = false;
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(groups, null);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(groups, null);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       ResultCollector<?, ?> rc =
@@ -224,17 +223,16 @@ public class DeployCommands extends AbstractCommandsSupport {
   @CliMetaData(relatedTopic = {CliStrings.TOPIC_GEODE_CONFIG})
   @ResourceOperation(resource = Resource.CLUSTER, operation = Operation.READ)
   public Result listDeployed(@CliOption(key = {CliStrings.LIST_DEPLOYED__GROUP},
-      help = CliStrings.LIST_DEPLOYED__GROUP__HELP) String group) {
+      help = CliStrings.LIST_DEPLOYED__GROUP__HELP) String[] group) {
 
     try {
       TabularResultData tabularData = ResultBuilder.createTabularResultData();
       boolean accumulatedData = false;
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(group, null);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, null);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       ResultCollector<?, ?> rc =

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DiskStoreCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DiskStoreCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DiskStoreCommands.java
index 4232d91..226cfaf 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DiskStoreCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DiskStoreCommands.java
@@ -353,11 +353,10 @@ public class DiskStoreCommands extends AbstractCommandsSupport {
       TabularResultData tabularData = ResultBuilder.createTabularResultData();
       boolean accumulatedData = false;
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(groups, null);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(groups, null);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       ResultCollector<?, ?> rc = CliUtil.executeFunction(new CreateDiskStoreFunction(),
@@ -1410,11 +1409,10 @@ public class DiskStoreCommands extends AbstractCommandsSupport {
       TabularResultData tabularData = ResultBuilder.createTabularResultData();
       boolean accumulatedData = false;
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(groups, null);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(groups, null);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       ResultCollector<?, ?> rc = CliUtil.executeFunction(new DestroyDiskStoreFunction(),

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DurableClientCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DurableClientCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DurableClientCommands.java
index bcbfcf0..9cb87ac 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DurableClientCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/DurableClientCommands.java
@@ -73,21 +73,21 @@ public class DurableClientCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.LIST_DURABLE_CQS__MEMBER,
           help = CliStrings.LIST_DURABLE_CQS__MEMBER__HELP,
-          optionContext = ConverterHint.MEMBERIDNAME) final String memberNameOrId,
+          optionContext = ConverterHint.MEMBERIDNAME) final String[] memberNameOrId,
 
       @CliOption(key = CliStrings.LIST_DURABLE_CQS__GROUP,
           help = CliStrings.LIST_DURABLE_CQS__GROUP__HELP,
-          optionContext = ConverterHint.MEMBERGROUP) final String group) {
+          optionContext = ConverterHint.MEMBERGROUP) final String[] group) {
     Result result = null;
     try {
 
       boolean noResults = true;
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrId);
-      } catch (CommandResultException e) {
-        return e.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
+
       final ResultCollector<?, ?> rc =
           CliUtil.executeFunction(new ListDurableCqNamesFunction(), durableClientId, targetMembers);
       final List<DurableCqNamesResult> results = (List<DurableCqNamesResult>) rc.getResult();
@@ -153,19 +153,19 @@ public class DurableClientCommands extends AbstractCommandsSupport {
           help = CliStrings.COUNT_DURABLE_CQ_EVENTS__DURABLE__CQ__NAME__HELP) final String cqName,
       @CliOption(key = CliStrings.COUNT_DURABLE_CQ_EVENTS__MEMBER, mandatory = false,
           help = CliStrings.COUNT_DURABLE_CQ_EVENTS__MEMBER__HELP,
-          optionContext = ConverterHint.MEMBERIDNAME) final String memberNameOrId,
+          optionContext = ConverterHint.MEMBERIDNAME) final String[] memberNameOrId,
       @CliOption(key = CliStrings.COUNT_DURABLE_CQ_EVENTS__GROUP, mandatory = false,
           help = CliStrings.COUNT_DURABLE_CQ_EVENTS__GROUP__HELP,
-          optionContext = ConverterHint.MEMBERGROUP) final String group) {
+          optionContext = ConverterHint.MEMBERGROUP) final String[] group) {
 
     Result result = null;
     try {
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrId);
-      } catch (CommandResultException e) {
-        return e.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
+
       String[] params = new String[2];
       params[0] = durableClientId;
       params[1] = cqName;
@@ -200,19 +200,20 @@ public class DurableClientCommands extends AbstractCommandsSupport {
           help = CliStrings.CLOSE_DURABLE_CLIENTS__CLIENT__ID__HELP) final String durableClientId,
       @CliOption(key = CliStrings.CLOSE_DURABLE_CLIENTS__MEMBER, mandatory = false,
           help = CliStrings.CLOSE_DURABLE_CLIENTS__MEMBER__HELP,
-          optionContext = ConverterHint.MEMBERIDNAME) final String memberNameOrId,
+          optionContext = ConverterHint.MEMBERIDNAME) final String[] memberNameOrId,
       @CliOption(key = CliStrings.CLOSE_DURABLE_CLIENTS__GROUP, mandatory = false,
           help = CliStrings.COUNT_DURABLE_CQ_EVENTS__GROUP__HELP,
-          optionContext = ConverterHint.MEMBERGROUP) final String group) {
+          optionContext = ConverterHint.MEMBERGROUP) final String[] group) {
 
     Result result = null;
     try {
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrId);
-      } catch (CommandResultException e) {
-        return e.getResult();
+
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
+
       final ResultCollector<?, ?> rc =
           CliUtil.executeFunction(new CloseDurableClientFunction(), durableClientId, targetMembers);
       final List<MemberResult> results = (List<MemberResult>) rc.getResult();
@@ -240,18 +241,17 @@ public class DurableClientCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.CLOSE_DURABLE_CQS__MEMBER, mandatory = false,
           help = CliStrings.CLOSE_DURABLE_CQS__MEMBER__HELP,
-          optionContext = ConverterHint.MEMBERIDNAME) final String memberNameOrId,
+          optionContext = ConverterHint.MEMBERIDNAME) final String[] memberNameOrId,
 
       @CliOption(key = CliStrings.CLOSE_DURABLE_CQS__GROUP, mandatory = false,
           help = CliStrings.CLOSE_DURABLE_CQS__GROUP__HELP,
-          optionContext = ConverterHint.MEMBERGROUP) final String group) {
+          optionContext = ConverterHint.MEMBERGROUP) final String[] group) {
     Result result = null;
     try {
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrId);
-      } catch (CommandResultException e) {
-        return e.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       String[] params = new String[2];

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/FunctionCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/FunctionCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/FunctionCommands.java
index 2007e4a..8ea65ff 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/FunctionCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/FunctionCommands.java
@@ -560,19 +560,18 @@ public class FunctionCommands implements CommandMarker {
       @CliOption(key = CliStrings.LIST_FUNCTION__MATCHES,
           help = CliStrings.LIST_FUNCTION__MATCHES__HELP) String matches,
       @CliOption(key = CliStrings.LIST_FUNCTION__GROUP, optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.LIST_FUNCTION__GROUP__HELP) String groups,
+          help = CliStrings.LIST_FUNCTION__GROUP__HELP) String[] groups,
       @CliOption(key = CliStrings.LIST_FUNCTION__MEMBER, optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.LIST_FUNCTION__MEMBER__HELP) String members) {
+          help = CliStrings.LIST_FUNCTION__MEMBER__HELP) String[] members) {
     TabularResultData tabularData = ResultBuilder.createTabularResultData();
     boolean accumulatedData = false;
 
     InternalCache cache = getCache();
 
-    Set<DistributedMember> targetMembers;
-    try {
-      targetMembers = CliUtil.findMembersOrThrow(groups, members);
-    } catch (CommandResultException crex) {
-      return crex.getResult();
+    Set<DistributedMember> targetMembers = CliUtil.findMembers(groups, members);
+
+    if (targetMembers.isEmpty()) {
+      return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
     }
 
     try {

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/IndexCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/IndexCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/IndexCommands.java
index a4ba64c..407424a 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/IndexCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/IndexCommands.java
@@ -14,6 +14,7 @@
  */
 package org.apache.geode.management.internal.cli.commands;
 
+import org.apache.commons.lang.ArrayUtils;
 import org.apache.geode.SystemFailure;
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheFactory;
@@ -188,7 +189,7 @@ public class IndexCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.CREATE_INDEX__MEMBER, mandatory = false,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.CREATE_INDEX__MEMBER__HELP) final String memberNameOrID,
+          help = CliStrings.CREATE_INDEX__MEMBER__HELP) final String[] memberNameOrID,
 
       @CliOption(key = CliStrings.CREATE_INDEX__TYPE, mandatory = false,
           unspecifiedDefaultValue = "range", optionContext = ConverterHint.INDEX_TYPE,
@@ -196,7 +197,7 @@ public class IndexCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.CREATE_INDEX__GROUP, mandatory = false,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.CREATE_INDEX__GROUP__HELP) final String group) {
+          help = CliStrings.CREATE_INDEX__GROUP__HELP) final String[] group) {
 
     Result result = null;
     AtomicReference<XmlEntity> xmlEntity = new AtomicReference<>();
@@ -237,8 +238,12 @@ public class IndexCommands extends AbstractCommandsSupport {
 
       IndexInfo indexInfo = new IndexInfo(indexName, indexedExpression, regionPath, idxType);
 
-      final Set<DistributedMember> targetMembers =
-          CliUtil.findMembersOrThrow(group, memberNameOrID);
+      final Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrID);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
+
       final ResultCollector<?, ?> rc =
           CliUtil.executeFunction(createIndexFunction, indexInfo, targetMembers);
 
@@ -309,16 +314,14 @@ public class IndexCommands extends AbstractCommandsSupport {
         }
         result = ResultBuilder.buildResult(erd);
       }
-    } catch (CommandResultException crex) {
-      result = crex.getResult();
     } catch (Exception e) {
       result = ResultBuilder.createGemFireErrorResult(e.getMessage());
     }
 
 
     if (xmlEntity.get() != null) {
-      persistClusterConfiguration(result, () -> getSharedConfiguration()
-          .addXmlEntity(xmlEntity.get(), group != null ? group.split(",") : null));
+      persistClusterConfiguration(result,
+          () -> getSharedConfiguration().addXmlEntity(xmlEntity.get(), group));
     }
 
     return result;
@@ -337,16 +340,16 @@ public class IndexCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.DESTROY_INDEX__MEMBER, mandatory = false,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.DESTROY_INDEX__MEMBER__HELP) final String memberNameOrID,
+          help = CliStrings.DESTROY_INDEX__MEMBER__HELP) final String[] memberNameOrID,
 
       @CliOption(key = CliStrings.DESTROY_INDEX__GROUP, mandatory = false,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.DESTROY_INDEX__GROUP__HELP) final String group) {
+          help = CliStrings.DESTROY_INDEX__GROUP__HELP) final String[] group) {
 
     Result result = null;
 
     if (StringUtils.isBlank(indexName) && StringUtils.isBlank(regionPath)
-        && StringUtils.isBlank(memberNameOrID) && StringUtils.isBlank(group)) {
+        && ArrayUtils.isEmpty(group) && ArrayUtils.isEmpty(memberNameOrID)) {
       return ResultBuilder.createUserErrorResult(
           CliStrings.format(CliStrings.PROVIDE_ATLEAST_ONE_OPTION, CliStrings.DESTROY_INDEX));
     }
@@ -364,12 +367,10 @@ public class IndexCommands extends AbstractCommandsSupport {
     }
 
     IndexInfo indexInfo = new IndexInfo(indexName, regionName);
-    Set<DistributedMember> targetMembers = null;
+    Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrID);
 
-    try {
-      targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrID);
-    } catch (CommandResultException e) {
-      return e.getResult();
+    if (targetMembers.isEmpty()) {
+      return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
     }
 
     ResultCollector rc = CliUtil.executeFunction(destroyIndexFunction, indexInfo, targetMembers);
@@ -456,8 +457,8 @@ public class IndexCommands extends AbstractCommandsSupport {
       result = ResultBuilder.buildResult(erd);
     }
     if (xmlEntity.get() != null) {
-      persistClusterConfiguration(result, () -> getSharedConfiguration()
-          .deleteXmlEntity(xmlEntity.get(), group != null ? group.split(",") : null));
+      persistClusterConfiguration(result,
+          () -> getSharedConfiguration().deleteXmlEntity(xmlEntity.get(), group));
     }
 
     return result;
@@ -539,11 +540,11 @@ public class IndexCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.CREATE_DEFINED_INDEXES__MEMBER, mandatory = false,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.CREATE_DEFINED_INDEXES__MEMBER__HELP) final String memberNameOrID,
+          help = CliStrings.CREATE_DEFINED_INDEXES__MEMBER__HELP) final String[] memberNameOrID,
 
       @CliOption(key = CliStrings.CREATE_DEFINED_INDEXES__GROUP, mandatory = false,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.CREATE_DEFINED_INDEXES__GROUP__HELP) final String group) {
+          help = CliStrings.CREATE_DEFINED_INDEXES__GROUP__HELP) final String[] group) {
 
     Result result = null;
     AtomicReference<XmlEntity> xmlEntity = new AtomicReference<>();
@@ -555,10 +556,13 @@ public class IndexCommands extends AbstractCommandsSupport {
     }
 
     try {
-      final Cache cache = CacheFactory.getAnyInstance();
+      final Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrID);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
-      final Set<DistributedMember> targetMembers =
-          CliUtil.findMembersOrThrow(group, memberNameOrID);
+      final Cache cache = CacheFactory.getAnyInstance();
       final ResultCollector<?, ?> rc =
           CliUtil.executeFunction(createDefinedIndexesFunction, indexDefinitions, targetMembers);
 
@@ -622,15 +626,13 @@ public class IndexCommands extends AbstractCommandsSupport {
         }
         result = ResultBuilder.buildResult(erd);
       }
-    } catch (CommandResultException crex) {
-      result = crex.getResult();
     } catch (Exception e) {
       result = ResultBuilder.createGemFireErrorResult(e.getMessage());
     }
 
     if (xmlEntity.get() != null) {
-      persistClusterConfiguration(result, () -> getSharedConfiguration()
-          .addXmlEntity(xmlEntity.get(), group != null ? group.split(",") : null));
+      persistClusterConfiguration(result,
+          () -> getSharedConfiguration().addXmlEntity(xmlEntity.get(), group));
     }
     return result;
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/MiscellaneousCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/MiscellaneousCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/MiscellaneousCommands.java
index 0d714f4..a23afd0 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/MiscellaneousCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/MiscellaneousCommands.java
@@ -672,11 +672,11 @@ public class MiscellaneousCommands implements CommandMarker {
   @ResourceOperation(resource = Resource.CLUSTER, operation = Operation.READ)
   public Result exportStackTrace(@CliOption(key = CliStrings.EXPORT_STACKTRACE__MEMBER,
       optionContext = ConverterHint.ALL_MEMBER_IDNAME,
-      help = CliStrings.EXPORT_STACKTRACE__HELP) String memberNameOrId,
+      help = CliStrings.EXPORT_STACKTRACE__HELP) String[] memberNameOrId,
 
       @CliOption(key = CliStrings.EXPORT_STACKTRACE__GROUP,
           optionContext = ConverterHint.ALL_MEMBER_IDNAME,
-          help = CliStrings.EXPORT_STACKTRACE__GROUP) String group,
+          help = CliStrings.EXPORT_STACKTRACE__GROUP) String[] group,
 
       @CliOption(key = CliStrings.EXPORT_STACKTRACE__FILE,
           help = CliStrings.EXPORT_STACKTRACE__FILE__HELP) String fileName,
@@ -687,29 +687,27 @@ public class MiscellaneousCommands implements CommandMarker {
 
     Result result = null;
     StringBuffer filePrefix = new StringBuffer("stacktrace");
+
+    if (fileName == null) {
+      fileName = filePrefix.append("_").append(System.currentTimeMillis()).toString();
+    }
+    final File outFile = new File(fileName);
     try {
-      if (fileName == null) {
-        fileName = filePrefix.append("_").append(System.currentTimeMillis()).toString();
-      }
-      final File outFile = new File(fileName);
       if (outFile.exists() && failIfFilePresent) {
         return ResultBuilder.createShellClientErrorResult(CliStrings.format(
             CliStrings.EXPORT_STACKTRACE__ERROR__FILE__PRESENT, outFile.getCanonicalPath()));
       }
 
+
       InternalCache cache = getCache();
       InternalDistributedSystem ads = cache.getInternalDistributedSystem();
 
       InfoResultData resultData = ResultBuilder.createInfoResultData();
 
       Map<String, byte[]> dumps = new HashMap<String, byte[]>();
-      Set<DistributedMember> targetMembers = null;
-
-      if ((group == null || group.isEmpty())
-          && (memberNameOrId == null || memberNameOrId.isEmpty())) {
-        targetMembers = CliUtil.getAllMembers(cache);
-      } else {
-        targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrId);
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       ResultCollector<?, ?> rc =
@@ -728,9 +726,7 @@ public class MiscellaneousCommands implements CommandMarker {
       resultData.addLine(CliStrings.EXPORT_STACKTRACE__HOST + ads.getDistributedMember().getHost());
 
       result = ResultBuilder.buildResult(resultData);
-    } catch (CommandResultException crex) {
-      return crex.getResult();
-    } catch (Exception ex) {
+    } catch (IOException ex) {
       result = ResultBuilder
           .createGemFireErrorResult(CliStrings.EXPORT_STACKTRACE__ERROR + ex.getMessage());
     }

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/QueueCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/QueueCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/QueueCommands.java
index 6208adb..f4dee75 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/QueueCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/QueueCommands.java
@@ -123,11 +123,10 @@ public class QueueCommands extends AbstractCommandsSupport {
       TabularResultData tabularData = ResultBuilder.createTabularResultData();
       boolean accumulatedData = false;
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(groups, null);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(groups, null);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       AsyncEventQueueFunctionArgs aeqArgs = new AsyncEventQueueFunctionArgs(id, parallel,
@@ -188,11 +187,10 @@ public class QueueCommands extends AbstractCommandsSupport {
       TabularResultData tabularData = ResultBuilder.createTabularResultData();
       boolean accumulatedData = false;
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow((String) null, (String) null);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(null, null);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       ResultCollector<?, ?> rc = CliUtil.executeFunction(new ListAsyncEventQueuesFunction(),

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/RegionCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/RegionCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/RegionCommands.java
index 561d4b8..6f5b047 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/RegionCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/RegionCommands.java
@@ -80,19 +80,18 @@ public class RegionCommands implements CommandMarker {
   @ResourceOperation(resource = Resource.DATA, operation = Operation.READ)
   public Result listRegion(
       @CliOption(key = {CliStrings.LIST_REGION__GROUP}, optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.LIST_REGION__GROUP__HELP) String group,
+          help = CliStrings.LIST_REGION__GROUP__HELP) String[] group,
       @CliOption(key = {CliStrings.LIST_REGION__MEMBER}, optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.LIST_REGION__MEMBER__HELP) String memberNameOrId) {
+          help = CliStrings.LIST_REGION__MEMBER__HELP) String[] memberNameOrId) {
     Result result = null;
     try {
       Set<RegionInformation> regionInfoSet = new LinkedHashSet<RegionInformation>();
       ResultCollector<?, ?> rc = null;
 
-      Set<DistributedMember> targetMembers;
-      try {
-        targetMembers = CliUtil.findMembersOrThrow(group, memberNameOrId);
-      } catch (CommandResultException crex) {
-        return crex.getResult();
+      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
+
+      if (targetMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
       }
 
       TabularResultData resultData = ResultBuilder.createTabularResultData();

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/WanCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/WanCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/WanCommands.java
index feeb353..57080ba 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/WanCommands.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/WanCommands.java
@@ -78,7 +78,7 @@ public class WanCommands extends AbstractCommandsSupport {
       @CliOption(key = CliStrings.CREATE_GATEWAYSENDER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
           unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.CREATE_GATEWAYSENDER__MEMBER__HELP) String onMember,
+          help = CliStrings.CREATE_GATEWAYSENDER__MEMBER__HELP) String[] onMember,
 
       @CliOption(key = CliStrings.CREATE_GATEWAYSENDER__ID, mandatory = true,
           help = CliStrings.CREATE_GATEWAYSENDER__ID__HELP) String id,
@@ -145,7 +145,11 @@ public class WanCommands extends AbstractCommandsSupport {
           gatewayEventFilters, gatewayTransportFilter);
 
       Set<DistributedMember> membersToCreateGatewaySenderOn =
-          CliUtil.findMembersOrThrow(onGroups, onMember == null ? null : onMember.split(","));
+          CliUtil.findMembers(onGroups, onMember);
+
+      if (membersToCreateGatewaySenderOn.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       ResultCollector<?, ?> resultCollector =
           CliUtil.executeFunction(GatewaySenderCreateFunction.INSTANCE, gatewaySenderFunctionArgs,
@@ -170,8 +174,6 @@ public class WanCommands extends AbstractCommandsSupport {
     } catch (IllegalArgumentException e) {
       LogWrapper.getInstance().info(e.getMessage());
       result = ResultBuilder.createUserErrorResult(e.getMessage());
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     }
 
     if (xmlEntity.get() != null) {
@@ -190,11 +192,11 @@ public class WanCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.START_GATEWAYSENDER__GROUP,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.START_GATEWAYSENDER__GROUP__HELP) String onGroup,
+          help = CliStrings.START_GATEWAYSENDER__GROUP__HELP) String[] onGroup,
 
       @CliOption(key = CliStrings.START_GATEWAYSENDER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.START_GATEWAYSENDER__MEMBER__HELP) String onMember) {
+          help = CliStrings.START_GATEWAYSENDER__MEMBER__HELP) String[] onMember) {
 
     Result result = null;
     final String id = senderId.trim();
@@ -205,7 +207,12 @@ public class WanCommands extends AbstractCommandsSupport {
           (SystemManagementService) ManagementService.getExistingManagementService(cache);
 
       TabularResultData resultData = ResultBuilder.createTabularResultData();
-      Set<DistributedMember> dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       ExecutorService execService = Executors.newCachedThreadPool(new ThreadFactory() {
         AtomicInteger threadNum = new AtomicInteger();
@@ -293,8 +300,6 @@ public class WanCommands extends AbstractCommandsSupport {
       }
       execService.shutdown();
       result = ResultBuilder.buildResult(resultData);
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     } catch (Exception e) {
       LogWrapper.getInstance().warning(CliStrings.GATEWAY_ERROR + CliUtil.stackTraceAsString(e));
       result = ResultBuilder.createGemFireErrorResult(CliStrings.GATEWAY_ERROR + e.getMessage());
@@ -312,11 +317,11 @@ public class WanCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.PAUSE_GATEWAYSENDER__GROUP,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.PAUSE_GATEWAYSENDER__GROUP__HELP) String onGroup,
+          help = CliStrings.PAUSE_GATEWAYSENDER__GROUP__HELP) String[] onGroup,
 
       @CliOption(key = CliStrings.PAUSE_GATEWAYSENDER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.PAUSE_GATEWAYSENDER__MEMBER__HELP) String onMember) {
+          help = CliStrings.PAUSE_GATEWAYSENDER__MEMBER__HELP) String[] onMember) {
 
     Result result = null;
     if (senderId != null) {
@@ -331,9 +336,13 @@ public class WanCommands extends AbstractCommandsSupport {
       GatewaySenderMXBean bean = null;
 
       TabularResultData resultData = ResultBuilder.createTabularResultData();
-      Set<DistributedMember> dsMembers = null;
 
-      dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
+
       for (DistributedMember member : dsMembers) {
         if (cache.getDistributedSystem().getDistributedMember().getId().equals(member.getId())) {
           bean = service.getLocalGatewaySenderMXBean(senderId);
@@ -365,8 +374,6 @@ public class WanCommands extends AbstractCommandsSupport {
         }
       }
       result = ResultBuilder.buildResult(resultData);
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     } catch (Exception e) {
       LogWrapper.getInstance().warning(CliStrings.GATEWAY_ERROR + CliUtil.stackTraceAsString(e));
       result = ResultBuilder.createGemFireErrorResult(CliStrings.GATEWAY_ERROR + e.getMessage());
@@ -383,10 +390,10 @@ public class WanCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.RESUME_GATEWAYSENDER__GROUP,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.RESUME_GATEWAYSENDER__GROUP__HELP) String onGroup,
+          help = CliStrings.RESUME_GATEWAYSENDER__GROUP__HELP) String[] onGroup,
       @CliOption(key = CliStrings.RESUME_GATEWAYSENDER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.RESUME_GATEWAYSENDER__MEMBER__HELP) String onMember) {
+          help = CliStrings.RESUME_GATEWAYSENDER__MEMBER__HELP) String[] onMember) {
 
     Result result = null;
     if (senderId != null) {
@@ -402,7 +409,12 @@ public class WanCommands extends AbstractCommandsSupport {
 
       TabularResultData resultData = ResultBuilder.createTabularResultData();
 
-      Set<DistributedMember> dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
+
       for (DistributedMember member : dsMembers) {
         if (cache.getDistributedSystem().getDistributedMember().getId().equals(member.getId())) {
           bean = service.getLocalGatewaySenderMXBean(senderId);
@@ -434,8 +446,6 @@ public class WanCommands extends AbstractCommandsSupport {
         }
       }
       result = ResultBuilder.buildResult(resultData);
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     } catch (Exception e) {
       LogWrapper.getInstance().warning(CliStrings.GATEWAY_ERROR + CliUtil.stackTraceAsString(e));
       result = ResultBuilder.createGemFireErrorResult(CliStrings.GATEWAY_ERROR + e.getMessage());
@@ -452,11 +462,11 @@ public class WanCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.STOP_GATEWAYSENDER__GROUP,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.STOP_GATEWAYSENDER__GROUP__HELP) String onGroup,
+          help = CliStrings.STOP_GATEWAYSENDER__GROUP__HELP) String[] onGroup,
 
       @CliOption(key = CliStrings.STOP_GATEWAYSENDER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.STOP_GATEWAYSENDER__MEMBER__HELP) String onMember) {
+          help = CliStrings.STOP_GATEWAYSENDER__MEMBER__HELP) String[] onMember) {
 
     Result result = null;
     if (senderId != null)
@@ -470,7 +480,12 @@ public class WanCommands extends AbstractCommandsSupport {
       GatewaySenderMXBean bean = null;
 
       TabularResultData resultData = ResultBuilder.createTabularResultData();
-      Set<DistributedMember> dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       for (DistributedMember member : dsMembers) {
         if (cache.getDistributedSystem().getDistributedMember().getId().equals(member.getId())) {
@@ -498,8 +513,6 @@ public class WanCommands extends AbstractCommandsSupport {
         }
       }
       result = ResultBuilder.buildResult(resultData);
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     } catch (Exception e) {
       LogWrapper.getInstance().warning(CliStrings.GATEWAY_ERROR + CliUtil.stackTraceAsString(e));
       result = ResultBuilder.createGemFireErrorResult(CliStrings.GATEWAY_ERROR + e.getMessage());
@@ -518,7 +531,7 @@ public class WanCommands extends AbstractCommandsSupport {
       @CliOption(key = CliStrings.CREATE_GATEWAYRECEIVER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
           unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.CREATE_GATEWAYRECEIVER__MEMBER__HELP) String onMember,
+          help = CliStrings.CREATE_GATEWAYRECEIVER__MEMBER__HELP) String[] onMember,
 
       @CliOption(key = CliStrings.CREATE_GATEWAYRECEIVER__MANUALSTART,
           help = CliStrings.CREATE_GATEWAYRECEIVER__MANUALSTART__HELP) Boolean manualStart,
@@ -550,7 +563,11 @@ public class WanCommands extends AbstractCommandsSupport {
               socketBufferSize, maximumTimeBetweenPings, gatewayTransportFilters);
 
       Set<DistributedMember> membersToCreateGatewayReceiverOn =
-          CliUtil.findMembersOrThrow(onGroups, onMember == null ? null : onMember.split(","));
+          CliUtil.findMembers(onGroups, onMember);
+
+      if (membersToCreateGatewayReceiverOn.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       ResultCollector<?, ?> resultCollector =
           CliUtil.executeFunction(GatewayReceiverCreateFunction.INSTANCE,
@@ -576,8 +593,6 @@ public class WanCommands extends AbstractCommandsSupport {
     } catch (IllegalArgumentException e) {
       LogWrapper.getInstance().info(e.getMessage());
       result = ResultBuilder.createUserErrorResult(e.getMessage());
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     }
 
     if (xmlEntity.get() != null) {
@@ -653,11 +668,11 @@ public class WanCommands extends AbstractCommandsSupport {
   @ResourceOperation(resource = Resource.DATA, operation = Operation.MANAGE)
   public Result startGatewayReceiver(@CliOption(key = CliStrings.START_GATEWAYRECEIVER__GROUP,
       optionContext = ConverterHint.MEMBERGROUP,
-      help = CliStrings.START_GATEWAYRECEIVER__GROUP__HELP) String onGroup,
+      help = CliStrings.START_GATEWAYRECEIVER__GROUP__HELP) String[] onGroup,
 
       @CliOption(key = CliStrings.START_GATEWAYRECEIVER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.START_GATEWAYRECEIVER__MEMBER__HELP) String onMember) {
+          help = CliStrings.START_GATEWAYRECEIVER__MEMBER__HELP) String[] onMember) {
     Result result = null;
 
     try {
@@ -668,7 +683,12 @@ public class WanCommands extends AbstractCommandsSupport {
       GatewayReceiverMXBean receieverBean = null;
 
       TabularResultData resultData = ResultBuilder.createTabularResultData();
-      Set<DistributedMember> dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       for (DistributedMember member : dsMembers) {
         ObjectName gatewayReceiverObjectName = MBeanJMXAdapter.getGatewayReceiverMBeanName(member);
@@ -715,11 +735,11 @@ public class WanCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.STOP_GATEWAYRECEIVER__GROUP,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.STOP_GATEWAYRECEIVER__GROUP__HELP) String onGroup,
+          help = CliStrings.STOP_GATEWAYRECEIVER__GROUP__HELP) String[] onGroup,
 
       @CliOption(key = CliStrings.STOP_GATEWAYRECEIVER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.STOP_GATEWAYRECEIVER__MEMBER__HELP) String onMember) {
+          help = CliStrings.STOP_GATEWAYRECEIVER__MEMBER__HELP) String[] onMember) {
 
     Result result = null;
 
@@ -731,7 +751,12 @@ public class WanCommands extends AbstractCommandsSupport {
       GatewayReceiverMXBean receieverBean = null;
 
       TabularResultData resultData = ResultBuilder.createTabularResultData();
-      Set<DistributedMember> dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       for (DistributedMember member : dsMembers) {
         ObjectName gatewayReceiverObjectName = MBeanJMXAdapter.getGatewayReceiverMBeanName(member);
@@ -776,9 +801,9 @@ public class WanCommands extends AbstractCommandsSupport {
   @ResourceOperation(resource = Resource.CLUSTER, operation = Operation.READ)
   public Result listGateway(
       @CliOption(key = CliStrings.LIST_GATEWAY__MEMBER, optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.LIST_GATEWAY__MEMBER__HELP) String onMember,
+          help = CliStrings.LIST_GATEWAY__MEMBER__HELP) String[] onMember,
       @CliOption(key = CliStrings.LIST_GATEWAY__GROUP, optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.LIST_GATEWAY__GROUP__HELP) String onGroup) {
+          help = CliStrings.LIST_GATEWAY__GROUP__HELP) String[] onGroup) {
 
     Result result = null;
     InternalCache cache = getCache();
@@ -786,7 +811,11 @@ public class WanCommands extends AbstractCommandsSupport {
       SystemManagementService service =
           (SystemManagementService) ManagementService.getExistingManagementService(cache);
 
-      Set<DistributedMember> dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       Map<String, Map<String, GatewaySenderMXBean>> gatewaySenderBeans =
           new TreeMap<String, Map<String, GatewaySenderMXBean>>();
@@ -838,8 +867,6 @@ public class WanCommands extends AbstractCommandsSupport {
       crd.setHeader(CliStrings.HEADER_GATEWAYS);
       accumulateListGatewayResult(crd, gatewaySenderBeans, gatewayReceiverBeans);
       result = ResultBuilder.buildResult(crd);
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     } catch (Exception e) {
       LogWrapper.getInstance().warning(CliStrings.GATEWAY_ERROR + CliUtil.stackTraceAsString(e));
       result = ResultBuilder.createGemFireErrorResult(CliStrings.GATEWAY_ERROR + e.getMessage());
@@ -856,11 +883,11 @@ public class WanCommands extends AbstractCommandsSupport {
 
       @CliOption(key = CliStrings.STATUS_GATEWAYSENDER__GROUP,
           optionContext = ConverterHint.MEMBERGROUP,
-          help = CliStrings.STATUS_GATEWAYSENDER__GROUP__HELP) String onGroup,
+          help = CliStrings.STATUS_GATEWAYSENDER__GROUP__HELP) String[] onGroup,
 
       @CliOption(key = CliStrings.STATUS_GATEWAYSENDER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.STATUS_GATEWAYSENDER__MEMBER__HELP) String onMember) {
+          help = CliStrings.STATUS_GATEWAYSENDER__MEMBER__HELP) String[] onMember) {
 
     Result result = null;
     if (senderId != null)
@@ -881,8 +908,12 @@ public class WanCommands extends AbstractCommandsSupport {
           crd.addSection(CliStrings.SECTION_GATEWAY_SENDER_NOT_AVAILABLE)
               .addTable(CliStrings.TABLE_GATEWAY_SENDER);
 
-      Set<DistributedMember> dsMembers = null;
-      dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
+
       for (DistributedMember member : dsMembers) {
         if (cache.getDistributedSystem().getDistributedMember().getId().equals(member.getId())) {
           bean = service.getLocalGatewaySenderMXBean(senderId);
@@ -897,8 +928,6 @@ public class WanCommands extends AbstractCommandsSupport {
         }
       }
       result = ResultBuilder.buildResult(crd);
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     } catch (Exception e) {
       LogWrapper.getInstance().warning(CliStrings.GATEWAY_ERROR + CliUtil.stackTraceAsString(e));
       result = ResultBuilder.createGemFireErrorResult(CliStrings.GATEWAY_ERROR + e.getMessage());
@@ -912,11 +941,11 @@ public class WanCommands extends AbstractCommandsSupport {
   @ResourceOperation(resource = Resource.CLUSTER, operation = Operation.READ)
   public Result statusGatewayReceiver(@CliOption(key = CliStrings.STATUS_GATEWAYRECEIVER__GROUP,
       optionContext = ConverterHint.MEMBERGROUP,
-      help = CliStrings.STATUS_GATEWAYRECEIVER__GROUP__HELP) String onGroup,
+      help = CliStrings.STATUS_GATEWAYRECEIVER__GROUP__HELP) String[] onGroup,
 
       @CliOption(key = CliStrings.STATUS_GATEWAYRECEIVER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
-          help = CliStrings.STATUS_GATEWAYRECEIVER__MEMBER__HELP) String onMember) {
+          help = CliStrings.STATUS_GATEWAYRECEIVER__MEMBER__HELP) String[] onMember) {
 
     Result result = null;
 
@@ -934,7 +963,11 @@ public class WanCommands extends AbstractCommandsSupport {
           crd.addSection(CliStrings.SECTION_GATEWAY_RECEIVER_NOT_AVAILABLE)
               .addTable(CliStrings.TABLE_GATEWAY_RECEIVER);
 
-      Set<DistributedMember> dsMembers = CliUtil.findMembersOrThrow(onGroup, onMember);
+      Set<DistributedMember> dsMembers = CliUtil.findMembers(onGroup, onMember);
+
+      if (dsMembers.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       for (DistributedMember member : dsMembers) {
         ObjectName gatewayReceiverObjectName = MBeanJMXAdapter.getGatewayReceiverMBeanName(member);
@@ -949,8 +982,6 @@ public class WanCommands extends AbstractCommandsSupport {
         buildReceiverStatus(member.getId(), null, notAvailableReceiverData);
       }
       result = ResultBuilder.buildResult(crd);
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     } catch (Exception e) {
       LogWrapper.getInstance().warning(CliStrings.GATEWAY_ERROR + CliUtil.stackTraceAsString(e));
       result = ResultBuilder.createGemFireErrorResult(CliStrings.GATEWAY_ERROR + e.getMessage());
@@ -969,7 +1000,7 @@ public class WanCommands extends AbstractCommandsSupport {
       @CliOption(key = CliStrings.DESTROY_GATEWAYSENDER__MEMBER,
           optionContext = ConverterHint.MEMBERIDNAME,
           unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE,
-          help = CliStrings.DESTROY_GATEWAYSENDER__MEMBER__HELP) String onMember,
+          help = CliStrings.DESTROY_GATEWAYSENDER__MEMBER__HELP) String[] onMember,
       @CliOption(key = CliStrings.DESTROY_GATEWAYSENDER__ID, mandatory = true,
           optionContext = ConverterHint.GATEWAY_SENDER_ID,
           help = CliStrings.DESTROY_GATEWAYSENDER__ID__HELP) String id) {
@@ -979,7 +1010,11 @@ public class WanCommands extends AbstractCommandsSupport {
           new GatewaySenderDestroyFunctionArgs(id);
 
       Set<DistributedMember> membersToDestroyGatewaySenderOn =
-          CliUtil.findMembersOrThrow(onGroups, onMember == null ? null : onMember.split(","));
+          CliUtil.findMembers(onGroups, onMember);
+
+      if (membersToDestroyGatewaySenderOn.isEmpty()) {
+        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+      }
 
       ResultCollector<?, ?> resultCollector =
           CliUtil.executeFunction(GatewaySenderDestroyFunction.INSTANCE,
@@ -1000,8 +1035,6 @@ public class WanCommands extends AbstractCommandsSupport {
     } catch (IllegalArgumentException e) {
       LogWrapper.getInstance().info(e.getMessage());
       result = ResultBuilder.createUserErrorResult(e.getMessage());
-    } catch (CommandResultException crex) {
-      result = handleCommandResultException(crex);
     }
     return result;
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/c1ab3ffe/geode-core/src/test/java/org/apache/geode/management/internal/cli/CliUtilDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/management/internal/cli/CliUtilDUnitTest.java b/geode-core/src/test/java/org/apache/geode/management/internal/cli/CliUtilDUnitTest.java
index d610ca0..489be28 100644
--- a/geode-core/src/test/java/org/apache/geode/management/internal/cli/CliUtilDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/management/internal/cli/CliUtilDUnitTest.java
@@ -261,35 +261,30 @@ public class CliUtilDUnitTest extends JUnit4CacheTestCase {
   }
 
   public void verifyFindAllMatchingMembers() {
-    try {
-      Set<DistributedMember> set = CliUtil.findMembersOrThrow(GROUP1, null);
-      assertNotNull(set);
-      assertEquals(2, set.size());
-      assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
-      assertEquals(true, containsMember(set, MEMBER_2_GROUP1));
-
-      set = CliUtil.findMembersOrThrow("group1,group2", null);
-      assertNotNull(set);
-      assertEquals(4, set.size());
-      assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
-      assertEquals(true, containsMember(set, MEMBER_2_GROUP1));
-      assertEquals(true, containsMember(set, MEMBER_1_GROUP2));
-      assertEquals(true, containsMember(set, MEMBER_2_GROUP2));
-
-      set = CliUtil.findMembersOrThrow(null, MEMBER_1_GROUP1);
-      assertNotNull(set);
-      assertEquals(1, set.size());
-      assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
-
-      set = CliUtil.findMembersOrThrow(null, "member1_group1,member2_group2");
-      assertNotNull(set);
-      assertEquals(2, set.size());
-      assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
-      assertEquals(true, containsMember(set, MEMBER_2_GROUP2));
-
-    } catch (CommandResultException e) {
-      Assert.fail("CliUtil failed with exception", e);
-    }
+    Set<DistributedMember> set = CliUtil.findMembers(GROUP1.split(","), null);
+    assertNotNull(set);
+    assertEquals(2, set.size());
+    assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
+    assertEquals(true, containsMember(set, MEMBER_2_GROUP1));
+
+    set = CliUtil.findMembers(new String[] {"group1", "group2"}, null);
+    assertNotNull(set);
+    assertEquals(4, set.size());
+    assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
+    assertEquals(true, containsMember(set, MEMBER_2_GROUP1));
+    assertEquals(true, containsMember(set, MEMBER_1_GROUP2));
+    assertEquals(true, containsMember(set, MEMBER_2_GROUP2));
+
+    set = CliUtil.findMembers(null, MEMBER_1_GROUP1.split(","));
+    assertNotNull(set);
+    assertEquals(1, set.size());
+    assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
+
+    set = CliUtil.findMembers(null, new String[] {"member1_group1", "member2_group2"});
+    assertNotNull(set);
+    assertEquals(2, set.size());
+    assertEquals(true, containsMember(set, MEMBER_1_GROUP1));
+    assertEquals(true, containsMember(set, MEMBER_2_GROUP2));
   }
 
   private Object containsMember(Set<DistributedMember> set, String string) {
@@ -311,22 +306,18 @@ public class CliUtilDUnitTest extends JUnit4CacheTestCase {
   public void verifyExecuteFunction() {
     DunitFunction function = new DunitFunction("myfunction");
     Set<DistributedMember> set;
-    try {
-      @SuppressWarnings("rawtypes")
-      Region region1 = getCache().getRegion(COMMON_REGION);
-      region1.clear();
-      set = CliUtil.findMembersOrThrow(GROUP1, null);
-      assertEquals(2, set.size());
-      ResultCollector collector = CliUtil.executeFunction(function, "executeOnGroup", set);
-      collector.getResult();
-      assertEquals(2, region1.size());
-      assertTrue(region1.containsKey(MEMBER_1_GROUP1));
-      assertTrue(region1.containsKey(MEMBER_2_GROUP1));
-      assertEquals("executeOnGroup", region1.get(MEMBER_1_GROUP1));
-      assertEquals("executeOnGroup", region1.get(MEMBER_2_GROUP1));
-    } catch (CommandResultException e) {
-      Assert.fail("Error during querying members", e);
-    }
+    @SuppressWarnings("rawtypes")
+    Region region1 = getCache().getRegion(COMMON_REGION);
+    region1.clear();
+    set = CliUtil.findMembers(GROUP1.split(","), null);
+    assertEquals(2, set.size());
+    ResultCollector collector = CliUtil.executeFunction(function, "executeOnGroup", set);
+    collector.getResult();
+    assertEquals(2, region1.size());
+    assertTrue(region1.containsKey(MEMBER_1_GROUP1));
+    assertTrue(region1.containsKey(MEMBER_2_GROUP1));
+    assertEquals("executeOnGroup", region1.get(MEMBER_1_GROUP1));
+    assertEquals("executeOnGroup", region1.get(MEMBER_2_GROUP1));
   }
 
   public void getRegionAssociatedMembers() {


[27/32] geode git commit: GEODE-2974: Remove GemFire product name from message string

Posted by kl...@apache.org.
GEODE-2974: Remove GemFire product name from message string

The error message now reports "Could not process command due to error."
followed by the decription of the error.


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/2fb73815
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/2fb73815
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/2fb73815

Branch: refs/heads/feature/GEODE-1279
Commit: 2fb738157ac8b5c8e2e2fab03e3e87874f8e5bc3
Parents: c08d70d
Author: Ken Howe <kh...@pivotal.io>
Authored: Wed May 24 13:34:37 2017 -0700
Committer: Ken Howe <kh...@pivotal.io>
Committed: Tue May 30 15:15:40 2017 -0700

----------------------------------------------------------------------
 .../internal/cli/result/ResultBuilder.java          | 16 +++++++---------
 1 file changed, 7 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/2fb73815/geode-core/src/main/java/org/apache/geode/management/internal/cli/result/ResultBuilder.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/result/ResultBuilder.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/result/ResultBuilder.java
index 6332540..94dbca0 100755
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/result/ResultBuilder.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/result/ResultBuilder.java
@@ -22,9 +22,7 @@ import java.util.Collection;
 import java.util.List;
 
 /**
- * 
- * 
- * @since GemFire 7.0
+ * Provides methods for creating {@link Result} objects to return from Gfsh command functions
  */
 public class ResultBuilder {
   public static final int CODE_SHELLCLIENT_ABORT_OP = 110;
@@ -38,7 +36,7 @@ public class ResultBuilder {
 
   // errors on member
   public static final int ERRORCODE_PARSING_ERROR = 501;
-  public static final int ERRORCODE_GEMFIRE_ERROR = 505;
+  public static final int ERRORCODE_GEODE_ERROR = 505;
   public static final int ERRORCODE_BADRESPONSE_ERROR = 510;
   public static final int ERRORCODE_BADCONFIG_ERROR = 515;
   public static final int ERRORCODE_USER_ERROR = 520;
@@ -88,17 +86,17 @@ public class ResultBuilder {
   }
 
   /**
-   * Method for convenience to create error result for error in GemFire while executing command.
+   * Method for convenience to create error result for error in Geode while executing command.
    * <p/>
    * Note: To build your own error result, use {@link #createErrorResultData()} to build
    * {@link ErrorResultData} & then use {@link #buildResult(ResultData)}
    * 
    * @param message Message to be shown to the user
-   * @return Result for error in GemFire while executing command.
+   * @return Result for error in Geode while executing command.
    */
   public static Result createGemFireErrorResult(String message) {
-    return createErrorResult(ERRORCODE_GEMFIRE_ERROR,
-        "Could not process command due to GemFire error. " + message);
+    return createErrorResult(ERRORCODE_GEODE_ERROR,
+        "Could not process command due to error. " + message);
   }
 
   public static Result createGemFireUnAuthorizedErrorResult(String message) {
@@ -268,7 +266,7 @@ public class ResultBuilder {
       while (result.hasNextLine()) {
         builder.append(result.nextLine());
       }
-      // TODO - Abhishek - what to do with incoming files??
+      // TODO - what to do with incoming files??
     }
 
     return builder.toString();


[24/32] geode git commit: GEODE-2957: Create Lucene index analyzer help updated to include keyword DEFAULT

Posted by kl...@apache.org.
GEODE-2957: Create Lucene index analyzer help updated to include keyword DEFAULT

	This closes #547


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/1ee19c75
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/1ee19c75
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/1ee19c75

Branch: refs/heads/feature/GEODE-1279
Commit: 1ee19c759c4bdd4133f7dcf7df648f3437234a7d
Parents: 5b2cdf8
Author: David Anuta <da...@gmail.com>
Authored: Tue May 30 09:27:11 2017 -0700
Committer: nabarun <nn...@pivotal.io>
Committed: Tue May 30 12:16:12 2017 -0700

----------------------------------------------------------------------
 .../apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/1ee19c75/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
index db9f7b9..8104b3f 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/LuceneCliStrings.java
@@ -43,7 +43,7 @@ public class LuceneCliStrings {
       "Fields on the region values which are stored in the lucene index.\nUse __REGION_VALUE_FIELD if the entire region value should be indexed.\n__REGION_VALUE_FIELD is valid only if the region values are strings or numbers.";
   public static final String LUCENE_CREATE_INDEX__ANALYZER = "analyzer";
   public static final String LUCENE_CREATE_INDEX__ANALYZER_HELP =
-      "Type of the analyzer for each field.";
+      "Type of the analyzer for each field.\nUse the case sensitive keyword DEFAULT or leave an analyzer blank to use the default standard analyzer.";
   public static final String CREATE_INDEX__SUCCESS__MSG =
       "Index successfully created with following details";
   public static final String CREATE_INDEX__FAILURE__MSG =


[25/32] geode git commit: GEODE-2941 Update Pulse documentation: logging info

Posted by kl...@apache.org.
GEODE-2941 Update Pulse documentation: logging info


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/fa808acd
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/fa808acd
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/fa808acd

Branch: refs/heads/feature/GEODE-1279
Commit: fa808acd41caa9cb76b1a5c66dd304925f31c0a2
Parents: 1ee19c7
Author: Dave Barnes <db...@pivotal.io>
Authored: Tue May 30 12:14:58 2017 -0700
Committer: Dave Barnes <db...@pivotal.io>
Committed: Tue May 30 13:41:43 2017 -0700

----------------------------------------------------------------------
 geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/fa808acd/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb b/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
index af9b1f5..4ce25e9 100644
--- a/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
+++ b/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
@@ -94,6 +94,11 @@ of your Geode installation directory. Depending on your application server, you
 
     Substitute the appropriate paths and passwords for the bracketed placeholders.
 
+6.  To change Pulse logging behavior, edit `log4j2.xml` in the same directory.
+    The default configuration sets the logging level to `INFO` and specifies a message pattern that
+    includes the date, name of the reporting logger, error level, name of the reporting thread, and the error message.
+    Refer to the [Apache Log4j documentation](https://logging.apache.org/log4j/2.x/manual/configuration.html#XML) for details on how to specify `log4j2.xml` content and syntax.
+
 7.  Restart the Web application server.
 
 8.  Access the Pulse application using the address, port, and application URL that you configured in


[32/32] geode git commit: 1279: rename tests with old bug system numbers

Posted by kl...@apache.org.
1279: rename tests with old bug system numbers

* Bug34387DUnitTest -> CreateAndLocalDestroyInTXRegressionTest
* Bug35214DUnitTest -> EntriesDoNotExpireDuringGIIRegressionTest
* Bug38013DUnitTest -> RemotePRValuesAreNotDeserializedRegressionTest
* Bug34948DUnitTest -> ValuesAreLazilyDeserializedRegressionTest


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/64404037
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/64404037
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/64404037

Branch: refs/heads/feature/GEODE-1279
Commit: 6440403727f8f584ae62ca74e1224da971b6ca11
Parents: 76ff506
Author: Kirk Lund <kl...@apache.org>
Authored: Wed May 24 13:09:11 2017 -0700
Committer: Kirk Lund <kl...@apache.org>
Committed: Wed May 31 10:00:50 2017 -0700

----------------------------------------------------------------------
 .../apache/geode/cache30/Bug34387DUnitTest.java | 188 ----------------
 .../apache/geode/cache30/Bug34948DUnitTest.java | 157 -------------
 .../apache/geode/cache30/Bug35214DUnitTest.java | 220 -------------------
 .../apache/geode/cache30/Bug38013DUnitTest.java | 150 -------------
 ...CreateAndLocalDestroyInTXRegressionTest.java | 165 ++++++++++++++
 ...triesDoNotExpireDuringGIIRegressionTest.java | 206 +++++++++++++++++
 ...RValuesAreNotDeserializedRegressionTest.java | 160 ++++++++++++++
 ...luesAreLazilyDeserializedRegressionTest.java | 165 ++++++++++++++
 .../cache/ConnectDisconnectDUnitTest.java       | 148 +++++--------
 .../dunit/internal/DistributedTestFixture.java  |  16 +-
 .../internal/JUnit3DistributedTestCase.java     |  62 ++----
 .../internal/JUnit4DistributedTestCase.java     | 123 ++++-------
 .../SerializableErrorCollector.java             |  22 ++
 13 files changed, 845 insertions(+), 937 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/64404037/geode-core/src/test/java/org/apache/geode/cache30/Bug34387DUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache30/Bug34387DUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache30/Bug34387DUnitTest.java
deleted file mode 100644
index d43be83..0000000
--- a/geode-core/src/test/java/org/apache/geode/cache30/Bug34387DUnitTest.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.cache30;
-
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.junit.categories.DistributedTest;
-
-import org.apache.geode.cache.AttributesFactory;
-import org.apache.geode.cache.CacheException;
-import org.apache.geode.cache.CacheListener;
-import org.apache.geode.cache.CacheTransactionManager;
-import org.apache.geode.cache.DataPolicy;
-import org.apache.geode.cache.EntryEvent;
-import org.apache.geode.cache.Region;
-import org.apache.geode.cache.Scope;
-import org.apache.geode.cache.UnsupportedOperationInTransactionException;
-import org.apache.geode.cache.util.CacheListenerAdapter;
-import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
-import org.apache.geode.internal.i18n.LocalizedStrings;
-import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.VM;
-
-/**
- * Test create + localDestroy for bug 34387
- *
- * @since GemFire 5.0
- */
-@Category(DistributedTest.class)
-public class Bug34387DUnitTest extends JUnit4CacheTestCase {
-
-  // private transient Region r;
-  // private transient DistributedMember otherId;
-  protected transient int invokeCount;
-
-  static volatile boolean callbackFailure;
-
-  public Bug34387DUnitTest() {
-    super();
-  }
-
-  protected static void callbackAssertEquals(String message, Object expected, Object actual) {
-    if (expected == null && actual == null)
-      return;
-    if (expected != null && expected.equals(actual))
-      return;
-    callbackFailure = true;
-    // Throws an error that is ignored, but...
-    assertEquals(message, expected, actual);
-  }
-
-
-  private VM getOtherVm() {
-    Host host = Host.getHost(0);
-    return host.getVM(0);
-  }
-
-  private void initOtherId() {
-    VM vm = getOtherVm();
-    vm.invoke(new CacheSerializableRunnable("Connect") {
-      public void run2() throws CacheException {
-        getCache();
-      }
-    });
-    vm.invoke(() -> Bug34387DUnitTest.getVMDistributedMember());
-  }
-
-  private void doCommitOtherVm(final boolean doDestroy) {
-    VM vm = getOtherVm();
-    vm.invoke(new CacheSerializableRunnable("create root") {
-      public void run2() throws CacheException {
-        AttributesFactory af = new AttributesFactory();
-        af.setScope(Scope.DISTRIBUTED_ACK);
-        af.setConcurrencyChecksEnabled(true);
-        Region r1 = createRootRegion("r1", af.create());
-        CacheTransactionManager ctm = getCache().getCacheTransactionManager();
-        ctm.begin();
-        r1.create("createKey", "createValue");
-        if (doDestroy) {
-          try {
-            r1.localDestroy("createKey");
-            fail("expected exception not thrown");
-          } catch (UnsupportedOperationInTransactionException e) {
-            assertEquals(e.getMessage(),
-                LocalizedStrings.TXStateStub_LOCAL_DESTROY_NOT_ALLOWED_IN_TRANSACTION
-                    .toLocalizedString());
-          }
-        } else {
-          try {
-            r1.localInvalidate("createKey");
-            fail("expected exception not thrown");
-          } catch (UnsupportedOperationInTransactionException e) {
-            assertEquals(e.getMessage(),
-                LocalizedStrings.TXStateStub_LOCAL_INVALIDATE_NOT_ALLOWED_IN_TRANSACTION
-                    .toLocalizedString());
-          }
-        }
-        ctm.commit();
-      }
-    });
-  }
-
-  public static DistributedMember getVMDistributedMember() {
-    return InternalDistributedSystem.getAnyInstance().getDistributedMember();
-  }
-
-  ////////////////////// Test Methods //////////////////////
-
-  /**
-   * test create followed by localDestroy
-   */
-  @Test
-  public void testCreateAndLD() throws CacheException {
-    initOtherId();
-    AttributesFactory af = new AttributesFactory();
-    af.setDataPolicy(DataPolicy.REPLICATE);
-    af.setScope(Scope.DISTRIBUTED_ACK);
-    af.setConcurrencyChecksEnabled(true);
-    callbackFailure = false;
-
-    CacheListener cl1 = new CacheListenerAdapter() {
-      public void afterCreate(EntryEvent e) {
-        callbackAssertEquals("Keys not equal", "createKey", e.getKey());
-        callbackAssertEquals("Values not equal", "createValue", e.getNewValue());
-        Bug34387DUnitTest.this.invokeCount++;
-      }
-    };
-    af.addCacheListener(cl1);
-    Region r1 = createRootRegion("r1", af.create());
-
-    this.invokeCount = 0;
-    assertNull(r1.getEntry("createKey"));
-    doCommitOtherVm(true);
-    assertNotNull(r1.getEntry("createKey"));
-    assertEquals("createValue", r1.getEntry("createKey").getValue());
-    assertEquals(1, this.invokeCount);
-    assertFalse("Errors in callbacks; check logs for details", callbackFailure);
-  }
-
-  /**
-   * test create followed by localInvalidate
-   */
-  @Test
-  public void testCreateAndLI() throws CacheException {
-    initOtherId();
-    AttributesFactory af = new AttributesFactory();
-    af.setDataPolicy(DataPolicy.REPLICATE);
-    af.setScope(Scope.DISTRIBUTED_ACK);
-    af.setConcurrencyChecksEnabled(true);
-    callbackFailure = false;
-
-    CacheListener cl1 = new CacheListenerAdapter() {
-      public void afterCreate(EntryEvent e) {
-        callbackAssertEquals("key not equal", "createKey", e.getKey());
-        callbackAssertEquals("value not equal", "createValue", e.getNewValue());
-        Bug34387DUnitTest.this.invokeCount++;
-      }
-    };
-    af.addCacheListener(cl1);
-    Region r1 = createRootRegion("r1", af.create());
-
-    this.invokeCount = 0;
-    assertNull(r1.getEntry("createKey"));
-    doCommitOtherVm(false);
-    assertNotNull(r1.getEntry("createKey"));
-    assertEquals("createValue", r1.getEntry("createKey").getValue());
-    assertEquals(1, this.invokeCount);
-    assertFalse("Errors in callbacks; check logs for details", callbackFailure);
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/64404037/geode-core/src/test/java/org/apache/geode/cache30/Bug34948DUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache30/Bug34948DUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache30/Bug34948DUnitTest.java
deleted file mode 100644
index 8b98cd3..0000000
--- a/geode-core/src/test/java/org/apache/geode/cache30/Bug34948DUnitTest.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.cache30;
-
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.junit.categories.DistributedTest;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.geode.DataSerializable;
-import org.apache.geode.DataSerializer;
-import org.apache.geode.cache.AttributesFactory;
-import org.apache.geode.cache.CacheException;
-import org.apache.geode.cache.CacheListener;
-import org.apache.geode.cache.DataPolicy;
-import org.apache.geode.cache.EntryEvent;
-import org.apache.geode.cache.Region;
-import org.apache.geode.cache.Scope;
-import org.apache.geode.cache.util.CacheListenerAdapter;
-import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.distributed.DistributedSystem;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
-import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.VM;
-
-/**
- * Test to make sure cache values are lazily deserialized
- *
- * @since GemFire 5.0
- */
-@Category(DistributedTest.class)
-public class Bug34948DUnitTest extends JUnit4CacheTestCase {
-
-  public Bug34948DUnitTest() {
-    super();
-  }
-
-  ////////////////////// Test Methods //////////////////////
-
-  private VM getOtherVm() {
-    Host host = Host.getHost(0);
-    return host.getVM(0);
-  }
-
-  static protected Object lastCallback = null;
-
-  private void doCreateOtherVm() {
-    VM vm = getOtherVm();
-    vm.invoke(new CacheSerializableRunnable("create root") {
-      public void run2() throws CacheException {
-        getSystem();
-        AttributesFactory af = new AttributesFactory();
-        af.setScope(Scope.DISTRIBUTED_ACK);
-        af.setDataPolicy(DataPolicy.PRELOADED);
-        CacheListener cl = new CacheListenerAdapter() {
-          public void afterCreate(EntryEvent event) {
-            // getLogWriter().info("afterCreate " + event.getKey());
-            if (event.getCallbackArgument() != null) {
-              lastCallback = event.getCallbackArgument();
-            }
-          }
-
-          public void afterUpdate(EntryEvent event) {
-            // getLogWriter().info("afterUpdate " + event.getKey());
-            if (event.getCallbackArgument() != null) {
-              lastCallback = event.getCallbackArgument();
-            }
-          }
-
-          public void afterInvalidate(EntryEvent event) {
-            if (event.getCallbackArgument() != null) {
-              lastCallback = event.getCallbackArgument();
-            }
-          }
-
-          public void afterDestroy(EntryEvent event) {
-            if (event.getCallbackArgument() != null) {
-              lastCallback = event.getCallbackArgument();
-            }
-          }
-        };
-        af.setCacheListener(cl);
-        createRootRegion("bug34948", af.create());
-      }
-    });
-  }
-
-  /**
-   * Make sure that value is only deserialized in cache whose application asks for the value.
-   */
-  @Test
-  public void testBug34948() throws CacheException {
-    final AttributesFactory factory = new AttributesFactory();
-    factory.setScope(Scope.DISTRIBUTED_ACK);
-    factory.setDataPolicy(DataPolicy.PRELOADED);
-    final Region r = createRootRegion("bug34948", factory.create());
-
-    // before gii
-    r.put("key1", new HomeBoy());
-
-    doCreateOtherVm();
-
-    // after gii
-    r.put("key2", new HomeBoy());
-
-    r.localDestroy("key1");
-    r.localDestroy("key2");
-
-    Object o = r.get("key1");
-    assertTrue(r.get("key1") instanceof HomeBoy);
-    assertTrue(r.get("key2") == null); // preload will not distribute
-
-    // @todo darrel: add putAll test once it does not deserialize
-  }
-
-  public static class HomeBoy implements DataSerializable {
-    public HomeBoy() {}
-
-    public void toData(DataOutput out) throws IOException {
-      DistributedMember me = InternalDistributedSystem.getAnyInstance().getDistributedMember();
-      DataSerializer.writeObject(me, out);
-    }
-
-    public void fromData(DataInput in) throws IOException, ClassNotFoundException {
-      DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
-      DistributedMember me = ds.getDistributedMember();
-      DistributedMember hb = (DistributedMember) DataSerializer.readObject(in);
-      if (me.equals(hb)) {
-        ds.getLogWriter().info("HomeBoy was deserialized on his home");
-      } else {
-        String msg = "HomeBoy was deserialized on " + me + " instead of his home " + hb;
-        ds.getLogWriter().error(msg);
-        throw new IllegalStateException(msg);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/64404037/geode-core/src/test/java/org/apache/geode/cache30/Bug35214DUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache30/Bug35214DUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache30/Bug35214DUnitTest.java
deleted file mode 100644
index ed25b26..0000000
--- a/geode-core/src/test/java/org/apache/geode/cache30/Bug35214DUnitTest.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.cache30;
-
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.junit.categories.DistributedTest;
-
-import org.apache.geode.SystemFailure;
-import org.apache.geode.cache.AttributesFactory;
-import org.apache.geode.cache.CacheException;
-import org.apache.geode.cache.CacheListener;
-import org.apache.geode.cache.DataPolicy;
-import org.apache.geode.cache.EntryEvent;
-import org.apache.geode.cache.ExpirationAction;
-import org.apache.geode.cache.ExpirationAttributes;
-import org.apache.geode.cache.Region;
-import org.apache.geode.cache.RegionEvent;
-import org.apache.geode.cache.Scope;
-import org.apache.geode.cache.util.CacheListenerAdapter;
-import org.apache.geode.internal.cache.LocalRegion;
-import org.apache.geode.test.dunit.Assert;
-import org.apache.geode.test.dunit.AsyncInvocation;
-import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.LogWriterUtils;
-import org.apache.geode.test.dunit.ThreadUtils;
-import org.apache.geode.test.dunit.VM;
-import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.WaitCriterion;
-
-/**
- * Make sure entry expiration does not happen during gii for bug 35214
- *
- * @since GemFire 5.0
- */
-@Category(DistributedTest.class)
-public class Bug35214DUnitTest extends JUnit4CacheTestCase {
-
-  protected volatile int expirationCount = 0;
-
-  private final static int ENTRY_COUNT = 100;
-
-  protected static volatile boolean callbackFailure;
-
-  public Bug35214DUnitTest() {
-    super();
-  }
-
-  private VM getOtherVm() {
-    Host host = Host.getHost(0);
-    return host.getVM(0);
-  }
-
-  private void initOtherVm() {
-    VM vm = getOtherVm();
-    vm.invoke(new CacheSerializableRunnable("init") {
-      public void run2() throws CacheException {
-        getCache();
-        AttributesFactory af = new AttributesFactory();
-        af.setScope(Scope.DISTRIBUTED_ACK);
-        Region r1 = createRootRegion("r1", af.create());
-        for (int i = 1; i <= ENTRY_COUNT; i++) {
-          r1.put("key" + i, "value" + i);
-        }
-      }
-    });
-  }
-
-  private AsyncInvocation updateOtherVm() throws Throwable {
-    VM vm = getOtherVm();
-    AsyncInvocation otherUpdater = vm.invokeAsync(new CacheSerializableRunnable("update") {
-      public void run2() throws CacheException {
-        Region r1 = getRootRegion("r1");
-        // let the main guys gii get started; we want to do updates
-        // during his gii
-        {
-          // wait for profile of getInitialImage cache to show up
-          org.apache.geode.internal.cache.CacheDistributionAdvisor adv =
-              ((org.apache.geode.internal.cache.DistributedRegion) r1)
-                  .getCacheDistributionAdvisor();
-          int numProfiles;
-          int expectedProfiles = 1;
-          for (;;) {
-            numProfiles = adv.adviseInitialImage(null).getReplicates().size();
-            if (numProfiles < expectedProfiles) {
-              // getLogWriter().info("PROFILE CHECK: Found " + numProfiles +
-              // " getInitialImage Profiles (waiting for " + expectedProfiles + ")");
-              // pause(5);
-            } else {
-              LogWriterUtils.getLogWriter()
-                  .info("PROFILE CHECK: Found " + numProfiles + " getInitialImage Profiles (OK)");
-              break;
-            }
-          }
-        }
-        // start doing updates of the keys to see if we can get deadlocked
-        int updateCount = 1;
-        do {
-          for (int i = 1; i <= ENTRY_COUNT; i++) {
-            String key = "key" + i;
-            if (r1.containsKey(key)) {
-              r1.destroy(key);
-            } else {
-              r1.put(key, "value" + i + "uc" + updateCount);
-            }
-          }
-        } while (updateCount++ < 20);
-        // do one more loop with no destroys
-        for (int i = 1; i <= ENTRY_COUNT; i++) {
-          String key = "key" + i;
-          if (!r1.containsKey(key)) {
-            r1.put(key, "value" + i + "uc" + updateCount);
-          }
-        }
-      }
-    });
-
-    // FIXME this thread does not terminate
-    // DistributedTestCase.join(otherUpdater, 5 * 60 * 1000, getLogWriter());
-    // if(otherUpdater.exceptionOccurred()){
-    // fail("otherUpdater failed", otherUpdater.getException());
-    // }
-
-    return otherUpdater;
-  }
-
-  ////////////////////// Test Methods //////////////////////
-
-  protected boolean afterRegionCreateSeen = false;
-
-  protected static void callbackAssertTrue(String msg, boolean cond) {
-    if (cond)
-      return;
-    callbackFailure = true;
-    // Throws ignored error, but...
-    assertTrue(msg, cond);
-  }
-
-
-  /**
-   * make sure entries do not expire during a GII
-   */
-  @Test
-  public void testNoEntryExpireDuringGII() throws Exception {
-    initOtherVm();
-    AsyncInvocation updater = null;
-    try {
-      updater = updateOtherVm();
-    } catch (VirtualMachineError e) {
-      SystemFailure.initiateFailure(e);
-      throw e;
-    } catch (Throwable e1) {
-      Assert.fail("failed due to " + e1, e1);
-    }
-    System.setProperty(LocalRegion.EXPIRY_MS_PROPERTY, "true");
-    org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 30;
-    callbackFailure = false;
-
-    try {
-      AttributesFactory af = new AttributesFactory();
-      af.setDataPolicy(DataPolicy.REPLICATE);
-      af.setScope(Scope.DISTRIBUTED_ACK);
-      af.setStatisticsEnabled(true);
-      af.setEntryIdleTimeout(new ExpirationAttributes(1, ExpirationAction.INVALIDATE));
-      CacheListener cl1 = new CacheListenerAdapter() {
-        public void afterRegionCreate(RegionEvent re) {
-          afterRegionCreateSeen = true;
-        }
-
-        public void afterInvalidate(EntryEvent e) {
-          callbackAssertTrue("afterregionCreate not seen", afterRegionCreateSeen);
-          // make sure region is initialized
-          callbackAssertTrue("not initialized", ((LocalRegion) e.getRegion()).isInitialized());
-          expirationCount++;
-          org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
-        }
-      };
-      af.addCacheListener(cl1);
-      final Region r1 = createRootRegion("r1", af.create());
-      ThreadUtils.join(updater, 60 * 1000);
-      WaitCriterion ev = new WaitCriterion() {
-        public boolean done() {
-          return r1.values().size() == 0;
-        }
-
-        public String description() {
-          return "region never became empty";
-        }
-      };
-      Wait.waitForCriterion(ev, 2 * 1000, 200, true);
-      {
-        assertEquals(0, r1.values().size());
-        assertEquals(ENTRY_COUNT, r1.keySet().size());
-      }
-
-    } finally {
-      org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
-      System.getProperties().remove(LocalRegion.EXPIRY_MS_PROPERTY);
-      assertEquals(null, System.getProperty(LocalRegion.EXPIRY_MS_PROPERTY));
-    }
-    assertFalse("Errors in callbacks; check logs for details", callbackFailure);
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/64404037/geode-core/src/test/java/org/apache/geode/cache30/Bug38013DUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache30/Bug38013DUnitTest.java b/geode-core/src/test/java/org/apache/geode/cache30/Bug38013DUnitTest.java
deleted file mode 100644
index a0e8021..0000000
--- a/geode-core/src/test/java/org/apache/geode/cache30/Bug38013DUnitTest.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.cache30;
-
-import org.junit.experimental.categories.Category;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
-import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase;
-import org.apache.geode.test.junit.categories.DistributedTest;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.geode.DataSerializable;
-import org.apache.geode.DataSerializer;
-import org.apache.geode.cache.AttributesFactory;
-import org.apache.geode.cache.CacheException;
-import org.apache.geode.cache.CacheListener;
-import org.apache.geode.cache.EntryEvent;
-import org.apache.geode.cache.PartitionAttributesFactory;
-import org.apache.geode.cache.Region;
-import org.apache.geode.cache.util.CacheListenerAdapter;
-import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.distributed.DistributedSystem;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
-import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.VM;
-
-/**
- * Test to make sure PR cache values are lazily deserialized
- *
- * @since GemFire 5.0
- */
-@Category(DistributedTest.class)
-public class Bug38013DUnitTest extends JUnit4CacheTestCase {
-
-  public Bug38013DUnitTest() {
-    super();
-  }
-
-  ////////////////////// Test Methods //////////////////////
-
-  private VM getOtherVm() {
-    Host host = Host.getHost(0);
-    return host.getVM(0);
-  }
-
-  static protected Object lastCallback = null;
-
-  private void doCreateOtherVm() {
-    VM vm = getOtherVm();
-    vm.invoke(new CacheSerializableRunnable("create root") {
-      public void run2() throws CacheException {
-        getSystem();
-        AttributesFactory af = new AttributesFactory();
-        CacheListener cl = new CacheListenerAdapter() {
-          public void afterCreate(EntryEvent event) {
-            // getLogWriter().info("afterCreate " + event.getKey());
-            if (event.getCallbackArgument() != null) {
-              lastCallback = event.getCallbackArgument();
-            }
-          }
-
-          public void afterUpdate(EntryEvent event) {
-            // getLogWriter().info("afterUpdate " + event.getKey());
-            if (event.getCallbackArgument() != null) {
-              lastCallback = event.getCallbackArgument();
-            }
-          }
-
-          public void afterInvalidate(EntryEvent event) {
-            if (event.getCallbackArgument() != null) {
-              lastCallback = event.getCallbackArgument();
-            }
-          }
-
-          public void afterDestroy(EntryEvent event) {
-            if (event.getCallbackArgument() != null) {
-              lastCallback = event.getCallbackArgument();
-            }
-          }
-        };
-        af.setCacheListener(cl);
-        // create a pr with a data store
-        PartitionAttributesFactory paf = new PartitionAttributesFactory();
-        paf.setRedundantCopies(0);
-        // use defaults so this is a data store
-        af.setPartitionAttributes(paf.create());
-        createRootRegion("bug38013", af.create());
-      }
-    });
-  }
-
-  /**
-   * Make sure that value is only deserialized in cache whose application asks for the value.
-   */
-  @Test
-  public void testBug38013() throws CacheException {
-    final AttributesFactory factory = new AttributesFactory();
-    PartitionAttributesFactory paf = new PartitionAttributesFactory();
-    paf.setRedundantCopies(0);
-    paf.setLocalMaxMemory(0); // make it an accessor
-    factory.setPartitionAttributes(paf.create());
-    final Region r = createRootRegion("bug38013", factory.create());
-
-    doCreateOtherVm();
-
-    r.put("key1", new HomeBoy());
-
-    assertTrue(r.get("key1") instanceof HomeBoy);
-  }
-
-  public static class HomeBoy implements DataSerializable {
-    public HomeBoy() {}
-
-    public void toData(DataOutput out) throws IOException {
-      DistributedMember me = InternalDistributedSystem.getAnyInstance().getDistributedMember();
-      DataSerializer.writeObject(me, out);
-    }
-
-    public void fromData(DataInput in) throws IOException, ClassNotFoundException {
-      DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
-      DistributedMember me = ds.getDistributedMember();
-      DistributedMember hb = (DistributedMember) DataSerializer.readObject(in);
-      if (me.equals(hb)) {
-        ds.getLogWriter().info("HomeBoy was deserialized on his home");
-      } else {
-        String msg = "HomeBoy was deserialized on " + me + " instead of his home " + hb;
-        ds.getLogWriter().error(msg);
-        throw new IllegalStateException(msg);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/64404037/geode-core/src/test/java/org/apache/geode/cache30/CreateAndLocalDestroyInTXRegressionTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache30/CreateAndLocalDestroyInTXRegressionTest.java b/geode-core/src/test/java/org/apache/geode/cache30/CreateAndLocalDestroyInTXRegressionTest.java
new file mode 100644
index 0000000..b978b85
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/cache30/CreateAndLocalDestroyInTXRegressionTest.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.cache30;
+
+import static org.apache.geode.internal.i18n.LocalizedStrings.TXStateStub_LOCAL_DESTROY_NOT_ALLOWED_IN_TRANSACTION;
+import static org.apache.geode.internal.i18n.LocalizedStrings.TXStateStub_LOCAL_INVALIDATE_NOT_ALLOWED_IN_TRANSACTION;
+import static org.hamcrest.core.IsEqual.equalTo;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
+import org.apache.geode.cache.AttributesFactory;
+import org.apache.geode.cache.CacheException;
+import org.apache.geode.cache.CacheListener;
+import org.apache.geode.cache.CacheTransactionManager;
+import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.EntryEvent;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.Scope;
+import org.apache.geode.cache.UnsupportedOperationInTransactionException;
+import org.apache.geode.cache.util.CacheListenerAdapter;
+import org.apache.geode.test.dunit.Host;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
+import org.apache.geode.test.junit.categories.DistributedTest;
+import org.apache.geode.test.junit.rules.serializable.SerializableErrorCollector;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test create + localDestroy for bug 34387
+ *
+ * #34387: TX in Proxy Regions with create followed by localDestroy on same key results in remote
+ * VMs receiving create events with null getNewValue().
+ *
+ * Create and LocalDestroy/LocalInvalidate should create event with NewValue
+ *
+ * @since GemFire 5.0
+ */
+@Category(DistributedTest.class)
+public class CreateAndLocalDestroyInTXRegressionTest extends JUnit4CacheTestCase {
+
+  private static final String REGION_NAME = "r1";
+
+  private int invokeCount;
+  private VM otherVM;
+  private transient Region region;
+
+  @Rule
+  public SerializableErrorCollector errorCollector = new SerializableErrorCollector();
+
+  @Before
+  public void setUp() throws Exception {
+    this.invokeCount = 0;
+    this.otherVM = Host.getHost(0).getVM(0);
+
+    initOtherVM(this.otherVM);
+    AttributesFactory af = new AttributesFactory();
+    af.setDataPolicy(DataPolicy.REPLICATE);
+    af.setScope(Scope.DISTRIBUTED_ACK);
+    af.setConcurrencyChecksEnabled(true);
+
+    CacheListener cl1 = new CacheListenerAdapter() {
+      @Override
+      public void afterCreate(EntryEvent e) {
+        errorCollector.checkThat("Keys not equal", "createKey", equalTo(e.getKey()));
+        errorCollector.checkThat("Values not equal", "createValue", equalTo(e.getNewValue()));
+        CreateAndLocalDestroyInTXRegressionTest.this.invokeCount++;
+      }
+    };
+
+    af.addCacheListener(cl1);
+    this.region = createRootRegion(REGION_NAME, af.create());
+
+    assertNull(this.region.getEntry("createKey"));
+  }
+
+  /**
+   * test create followed by localDestroy
+   */
+  @Test
+  public void createAndLocalDestroyShouldCreateEventWithNewValue() throws CacheException {
+    doCommitInOtherVm(otherVM, true);
+
+    assertNotNull(this.region.getEntry("createKey"));
+    assertEquals("createValue", this.region.getEntry("createKey").getValue());
+    assertEquals(1, this.invokeCount);
+  }
+
+  /**
+   * test create followed by localInvalidate
+   */
+  @Test
+  public void createAndLocalInvalidateShouldCreateEventWithNewValue() throws CacheException {
+    doCommitInOtherVm(this.otherVM, false);
+
+    assertNotNull(this.region.getEntry("createKey"));
+    assertEquals("createValue", this.region.getEntry("createKey").getValue());
+    assertEquals(1, this.invokeCount);
+  }
+
+  private void initOtherVM(VM otherVM) {
+    otherVM.invoke(new CacheSerializableRunnable("Connect") {
+      @Override
+      public void run2() throws CacheException {
+        getCache();
+      }
+    });
+  }
+
+  private void doCommitInOtherVm(VM otherVM, boolean doDestroy) {
+    otherVM.invoke(new CacheSerializableRunnable("create root") {
+      @Override
+      public void run2() throws CacheException {
+        AttributesFactory factory = new AttributesFactory();
+        factory.setScope(Scope.DISTRIBUTED_ACK);
+        factory.setConcurrencyChecksEnabled(true);
+
+        Region region = createRootRegion(REGION_NAME, factory.create());
+
+        CacheTransactionManager transactionManager = getCache().getCacheTransactionManager();
+        transactionManager.begin();
+
+        region.create("createKey", "createValue");
+
+        if (doDestroy) {
+          try {
+            region.localDestroy("createKey");
+            fail("expected exception not thrown");
+          } catch (UnsupportedOperationInTransactionException e) {
+            assertEquals(TXStateStub_LOCAL_DESTROY_NOT_ALLOWED_IN_TRANSACTION.toLocalizedString(),
+                e.getMessage());
+          }
+        } else {
+          try {
+            region.localInvalidate("createKey");
+            fail("expected exception not thrown");
+          } catch (UnsupportedOperationInTransactionException e) {
+            assertEquals(
+                TXStateStub_LOCAL_INVALIDATE_NOT_ALLOWED_IN_TRANSACTION.toLocalizedString(),
+                e.getMessage());
+          }
+        }
+
+        transactionManager.commit();
+      }
+    });
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/64404037/geode-core/src/test/java/org/apache/geode/cache30/EntriesDoNotExpireDuringGIIRegressionTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache30/EntriesDoNotExpireDuringGIIRegressionTest.java b/geode-core/src/test/java/org/apache/geode/cache30/EntriesDoNotExpireDuringGIIRegressionTest.java
new file mode 100644
index 0000000..3b78844
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/cache30/EntriesDoNotExpireDuringGIIRegressionTest.java
@@ -0,0 +1,206 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.cache30;
+
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.hamcrest.core.Is.is;
+
+import org.apache.geode.cache.AttributesFactory;
+import org.apache.geode.cache.CacheException;
+import org.apache.geode.cache.CacheListener;
+import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.EntryEvent;
+import org.apache.geode.cache.ExpirationAction;
+import org.apache.geode.cache.ExpirationAttributes;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.Scope;
+import org.apache.geode.cache.util.CacheListenerAdapter;
+import org.apache.geode.internal.cache.CacheDistributionAdvisor;
+import org.apache.geode.internal.cache.DistributedRegion;
+import org.apache.geode.internal.cache.InitialImageOperation;
+import org.apache.geode.internal.cache.LocalRegion;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.Host;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
+import org.apache.geode.test.dunit.rules.DistributedRestoreSystemProperties;
+import org.apache.geode.test.junit.categories.DistributedTest;
+import org.apache.geode.test.junit.rules.serializable.SerializableErrorCollector;
+import org.awaitility.Awaitility;
+import org.awaitility.core.ConditionFactory;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Make sure entry expiration does not happen during gii for bug 35214
+ *
+ * #35214: hang during getInitialImage due to entry expiration
+ *
+ * Entries should not expire during GII
+ *
+ * @since GemFire 5.0
+ */
+@Category(DistributedTest.class)
+public class EntriesDoNotExpireDuringGIIRegressionTest extends JUnit4CacheTestCase {
+
+  private static final int ENTRY_COUNT = 100;
+  private static final String REGION_NAME = "r1";
+
+  // TODO: value of expirationCount is not validated
+  private AtomicInteger expirationCount;
+  private AtomicBoolean afterRegionCreateInvoked;
+  private VM otherVM;
+
+  @Rule
+  public DistributedRestoreSystemProperties restoreSystemProperties =
+      new DistributedRestoreSystemProperties();
+
+  @Rule
+  public SerializableErrorCollector errorCollector = new SerializableErrorCollector();
+
+  @Before
+  public void before() throws Exception {
+    this.expirationCount = new AtomicInteger(0);
+    this.afterRegionCreateInvoked = new AtomicBoolean(false);
+    this.otherVM = Host.getHost(0).getVM(0);
+    initOtherVm(this.otherVM);
+
+    System.setProperty(LocalRegion.EXPIRY_MS_PROPERTY, "true");
+    InitialImageOperation.slowImageProcessing = 30;
+  }
+
+  @After
+  public void after() throws Exception {
+    InitialImageOperation.slowImageProcessing = 0;
+  }
+
+  /**
+   * make sure entries do not expire during a GII
+   */
+  @Test
+  public void entriesShouldNotExpireDuringGII() throws Exception {
+    AsyncInvocation updater = updateOtherVm(this.otherVM);
+
+    AttributesFactory factory = new AttributesFactory();
+    factory.setDataPolicy(DataPolicy.REPLICATE);
+    factory.setScope(Scope.DISTRIBUTED_ACK);
+    factory.setStatisticsEnabled(true);
+    factory.setEntryIdleTimeout(new ExpirationAttributes(1, ExpirationAction.INVALIDATE));
+    factory.addCacheListener(createCacheListener());
+
+    Region region = createRootRegion(REGION_NAME, factory.create());
+
+    updater.await();
+
+    await().until(() -> region.values().size() == 0);
+
+    assertThat(region.values().size()).isEqualTo(0);
+    assertThat(region.keySet().size()).isEqualTo(ENTRY_COUNT);
+  }
+
+  private void initOtherVm(VM otherVM) {
+    otherVM.invoke(new CacheSerializableRunnable("init") {
+      @Override
+      public void run2() throws CacheException {
+        getCache();
+
+        AttributesFactory factory = new AttributesFactory();
+        factory.setScope(Scope.DISTRIBUTED_ACK);
+
+        Region region = createRootRegion(REGION_NAME, factory.create());
+
+        for (int i = 1; i <= ENTRY_COUNT; i++) {
+          region.put("key" + i, "value" + i);
+        }
+      }
+    });
+  }
+
+  private AsyncInvocation updateOtherVm(VM otherVM) {
+    return otherVM.invokeAsync(new CacheSerializableRunnable("update") {
+      @Override
+      public void run2() throws CacheException {
+        Region region = getRootRegion(REGION_NAME);
+        // let the main guys gii get started; we want to do updates during his gii
+
+        // wait for profile of getInitialImage cache to show up
+        CacheDistributionAdvisor advisor =
+            ((DistributedRegion) region).getCacheDistributionAdvisor();
+        int expectedProfiles = 1;
+        await().until(
+            () -> assertThat(numberProfiles(advisor)).isGreaterThanOrEqualTo(expectedProfiles));
+
+        // start doing updates of the keys to see if we can get deadlocked
+        int updateCount = 1;
+        do {
+          for (int i = 1; i <= ENTRY_COUNT; i++) {
+            String key = "key" + i;
+            if (region.containsKey(key)) {
+              region.destroy(key);
+            } else {
+              region.put(key, "value" + i + "uc" + updateCount);
+            }
+          }
+        } while (updateCount++ < 20);
+
+        // do one more loop with no destroys
+        for (int i = 1; i <= ENTRY_COUNT; i++) {
+          String key = "key" + i;
+          if (!region.containsKey(key)) {
+            region.put(key, "value" + i + "uc" + updateCount);
+          }
+        }
+      }
+    });
+  }
+
+  private int numberProfiles(CacheDistributionAdvisor advisor) {
+    return advisor.adviseInitialImage(null).getReplicates().size();
+  }
+
+  private CacheListener createCacheListener() {
+    return new CacheListenerAdapter() {
+      @Override
+      public void afterRegionCreate(RegionEvent event) {
+        afterRegionCreateInvoked.set(true);
+      }
+
+      @Override
+      public void afterInvalidate(EntryEvent event) {
+        errorCollector.checkThat("afterRegionCreate should have been seen",
+            afterRegionCreateInvoked.get(), is(true));
+        errorCollector.checkThat("Region should have been initialized",
+            ((LocalRegion) event.getRegion()).isInitialized(), is(true));
+
+        expirationCount.incrementAndGet();
+
+        InitialImageOperation.slowImageProcessing = 0;
+      }
+    };
+  }
+
+  private ConditionFactory await() {
+    return Awaitility.await().atMost(2, MINUTES);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/64404037/geode-core/src/test/java/org/apache/geode/cache30/RemotePRValuesAreNotDeserializedRegressionTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache30/RemotePRValuesAreNotDeserializedRegressionTest.java b/geode-core/src/test/java/org/apache/geode/cache30/RemotePRValuesAreNotDeserializedRegressionTest.java
new file mode 100644
index 0000000..b8594df
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/cache30/RemotePRValuesAreNotDeserializedRegressionTest.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.cache30;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.geode.DataSerializable;
+import org.apache.geode.DataSerializer;
+import org.apache.geode.cache.AttributesFactory;
+import org.apache.geode.cache.CacheException;
+import org.apache.geode.cache.CacheListener;
+import org.apache.geode.cache.EntryEvent;
+import org.apache.geode.cache.PartitionAttributesFactory;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.util.CacheListenerAdapter;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.distributed.DistributedSystem;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.test.dunit.Host;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
+import org.apache.geode.test.junit.categories.DistributedTest;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * Test to make sure PR cache values are lazily deserialized
+ *
+ * #38013: PR regions do deserialization on remote bucket during get causing NoClassDefFoundError
+ *
+ * Remote PartitionedRegion values should not be deserialized
+ *
+ * @since GemFire 5.0
+ */
+@Category(DistributedTest.class)
+public class RemotePRValuesAreNotDeserializedRegressionTest extends JUnit4CacheTestCase {
+
+  private static final String REGION_NAME = "bug38013";
+
+  // TODO: value of lastCallback is not validated
+  private static Object lastCallback = null;
+
+  private VM otherVM;
+
+  @Before
+  public void before() throws Exception {
+    this.otherVM = Host.getHost(0).getVM(0);
+  }
+
+  /**
+   * Make sure that value is only deserialized in cache whose application asks for the value.
+   */
+  @Test
+  public void remotePRValuesShouldNotBeDeserialized() throws Exception {
+    PartitionAttributesFactory partitionAttributesFactory = new PartitionAttributesFactory();
+    partitionAttributesFactory.setRedundantCopies(0);
+    partitionAttributesFactory.setLocalMaxMemory(0); // make it an accessor
+
+    AttributesFactory factory = new AttributesFactory();
+    factory.setPartitionAttributes(partitionAttributesFactory.create());
+
+    Region<String, HomeBoy> region = createRootRegion(REGION_NAME, factory.create());
+
+    doCreateOtherVm(this.otherVM);
+
+    region.put("key1", new HomeBoy());
+
+    assertTrue(region.get("key1") instanceof HomeBoy);
+  }
+
+  private void doCreateOtherVm(VM otherVM) {
+    otherVM.invoke(new CacheSerializableRunnable("create root") {
+      public void run2() throws CacheException {
+        getSystem();
+
+        CacheListener listener = new CacheListenerAdapter() {
+          @Override
+          public void afterCreate(EntryEvent event) {
+            if (event.getCallbackArgument() != null) {
+              lastCallback = event.getCallbackArgument();
+            }
+          }
+
+          @Override
+          public void afterUpdate(EntryEvent event) {
+            if (event.getCallbackArgument() != null) {
+              lastCallback = event.getCallbackArgument();
+            }
+          }
+
+          @Override
+          public void afterInvalidate(EntryEvent event) {
+            if (event.getCallbackArgument() != null) {
+              lastCallback = event.getCallbackArgument();
+            }
+          }
+
+          @Override
+          public void afterDestroy(EntryEvent event) {
+            if (event.getCallbackArgument() != null) {
+              lastCallback = event.getCallbackArgument();
+            }
+          }
+        };
+
+        AttributesFactory factory = new AttributesFactory();
+        factory.setCacheListener(listener);
+
+        // create a pr with a data store
+        PartitionAttributesFactory partitionAttributesFactory = new PartitionAttributesFactory();
+        partitionAttributesFactory.setRedundantCopies(0);
+
+        // use defaults so this is a data store
+        factory.setPartitionAttributes(partitionAttributesFactory.create());
+        createRootRegion(REGION_NAME, factory.create());
+      }
+    });
+  }
+
+  private static class HomeBoy implements DataSerializable {
+    public HomeBoy() {}
+
+    @Override
+    public void toData(DataOutput out) throws IOException {
+      DistributedMember me = InternalDistributedSystem.getAnyInstance().getDistributedMember();
+      DataSerializer.writeObject(me, out);
+    }
+
+    @Override
+    public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+      DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
+      DistributedMember me = ds.getDistributedMember();
+      DistributedMember hb = DataSerializer.readObject(in);
+      if (me.equals(hb)) {
+        ds.getLogWriter().info("HomeBoy was deserialized on his home");
+      } else {
+        String msg = "HomeBoy was deserialized on " + me + " instead of his home " + hb;
+        ds.getLogWriter().error(msg);
+        throw new IllegalStateException(msg);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/64404037/geode-core/src/test/java/org/apache/geode/cache30/ValuesAreLazilyDeserializedRegressionTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/cache30/ValuesAreLazilyDeserializedRegressionTest.java b/geode-core/src/test/java/org/apache/geode/cache30/ValuesAreLazilyDeserializedRegressionTest.java
new file mode 100644
index 0000000..6aa0820
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/cache30/ValuesAreLazilyDeserializedRegressionTest.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.cache30;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.geode.DataSerializable;
+import org.apache.geode.DataSerializer;
+import org.apache.geode.cache.AttributesFactory;
+import org.apache.geode.cache.CacheException;
+import org.apache.geode.cache.CacheListener;
+import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.EntryEvent;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.Scope;
+import org.apache.geode.cache.util.CacheListenerAdapter;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.distributed.DistributedSystem;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.test.dunit.Host;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
+import org.apache.geode.test.junit.categories.DistributedTest;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * Test to make sure cache values are lazily deserialized
+ *
+ * #34948: distributed cache values are always getting deserialized
+ *
+ * @since GemFire 5.0
+ */
+@Category(DistributedTest.class)
+public class ValuesAreLazilyDeserializedRegressionTest extends JUnit4CacheTestCase {
+
+  private static final String REGION_NAME = "bug34948";
+
+  // TODO: value of lastCallback is not validated
+  private static Object lastCallback = null;
+
+  private VM otherVM;
+
+  @Before
+  public void before() throws Exception {
+    this.otherVM = Host.getHost(0).getVM(0);
+  }
+
+  /**
+   * Make sure that value is only deserialized in cache whose application asks for the value.
+   */
+  @Test
+  public void valueShouldBeLazilyDeserialized() throws CacheException {
+    AttributesFactory factory = new AttributesFactory();
+    factory.setScope(Scope.DISTRIBUTED_ACK);
+    factory.setDataPolicy(DataPolicy.PRELOADED);
+
+    Region<String, HomeBoy> region = createRootRegion(REGION_NAME, factory.create());
+
+    // before gii
+    region.put("key1", new HomeBoy());
+
+    doCreateOtherVm(this.otherVM);
+
+    // after gii
+    region.put("key2", new HomeBoy());
+
+    region.localDestroy("key1");
+    region.localDestroy("key2");
+
+    Object value = region.get("key1");
+    assertTrue(region.get("key1") instanceof HomeBoy);
+    assertTrue(region.get("key2") == null); // preload will not distribute
+
+    // TODO: add putAll test once it does not deserialize
+  }
+
+  private void doCreateOtherVm(VM otherVM) {
+    otherVM.invoke(new CacheSerializableRunnable("create root") {
+
+      @Override
+      public void run2() throws CacheException {
+        getSystem();
+
+        CacheListener<String, HomeBoy> listener = new CacheListenerAdapter<String, HomeBoy>() {
+          @Override
+          public void afterCreate(EntryEvent event) {
+            if (event.getCallbackArgument() != null) {
+              lastCallback = event.getCallbackArgument();
+            }
+          }
+
+          @Override
+          public void afterUpdate(EntryEvent event) {
+            if (event.getCallbackArgument() != null) {
+              lastCallback = event.getCallbackArgument();
+            }
+          }
+
+          @Override
+          public void afterInvalidate(EntryEvent event) {
+            if (event.getCallbackArgument() != null) {
+              lastCallback = event.getCallbackArgument();
+            }
+          }
+
+          @Override
+          public void afterDestroy(EntryEvent event) {
+            if (event.getCallbackArgument() != null) {
+              lastCallback = event.getCallbackArgument();
+            }
+          }
+        };
+
+        AttributesFactory<String, HomeBoy> factory = new AttributesFactory<>();
+        factory.setScope(Scope.DISTRIBUTED_ACK);
+        factory.setDataPolicy(DataPolicy.PRELOADED);
+        factory.setCacheListener(listener);
+
+        createRootRegion(REGION_NAME, factory.create());
+      }
+    });
+  }
+
+  private static class HomeBoy implements DataSerializable {
+    public HomeBoy() {}
+
+    @Override
+    public void toData(DataOutput out) throws IOException {
+      DistributedMember me = InternalDistributedSystem.getAnyInstance().getDistributedMember();
+      DataSerializer.writeObject(me, out);
+    }
+
+    @Override
+    public void fromData(DataInput in) throws IOException, ClassNotFoundException {
+      DistributedSystem ds = InternalDistributedSystem.getAnyInstance();
+      DistributedMember me = ds.getDistributedMember();
+      DistributedMember hb = DataSerializer.readObject(in);
+      if (me.equals(hb)) {
+        ds.getLogWriter().info("HomeBoy was deserialized on his home");
+      } else {
+        String msg = "HomeBoy was deserialized on " + me + " instead of his home " + hb;
+        ds.getLogWriter().error(msg);
+        throw new IllegalStateException(msg);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/64404037/geode-core/src/test/java/org/apache/geode/internal/cache/ConnectDisconnectDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/ConnectDisconnectDUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/ConnectDisconnectDUnitTest.java
index de63433..b52fe4d 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/ConnectDisconnectDUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/ConnectDisconnectDUnitTest.java
@@ -14,105 +14,87 @@
  */
 package org.apache.geode.internal.cache;
 
-import static org.apache.geode.distributed.ConfigurationProperties.*;
-
-import java.util.Properties;
-
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
+import static org.apache.geode.distributed.ConfigurationProperties.CONSERVE_SOCKETS;
+import static org.apache.geode.distributed.ConfigurationProperties.LOG_LEVEL;
+import static org.assertj.core.api.Assertions.assertThat;
 
+import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.test.dunit.AsyncInvocation;
-import org.apache.geode.test.dunit.DistributedTestUtils;
 import org.apache.geode.test.dunit.Host;
-import org.apache.geode.test.dunit.IgnoredException;
-import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.SerializableRunnable;
 import org.apache.geode.test.dunit.VM;
 import org.apache.geode.test.dunit.cache.internal.JUnit4CacheTestCase;
+import org.apache.geode.test.junit.Repeat;
 import org.apache.geode.test.junit.categories.DistributedTest;
+import org.apache.geode.test.junit.rules.RepeatRule;
+import org.apache.logging.log4j.Logger;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.util.Properties;
 
-/** A test of 46438 - missing response to an update attributes message */
+/**
+ * A test of 46438 - missing response to an update attributes message
+ *
+ * see bugs #50785 and #46438
+ */
 @Category(DistributedTest.class)
 public class ConnectDisconnectDUnitTest extends JUnit4CacheTestCase {
+  private static final Logger logger = LogService.getLogger();
 
-  private IgnoredException ex;
+  private static int count;
 
-  // see bugs #50785 and #46438
-  @Test
-  public void testManyConnectsAndDisconnects() throws Throwable {
-    // invokeInEveryVM(new SerializableRunnable() {
-    //
-    // @Override
-    // public void run() {
-    // Log.setLogWriterLevel("info");
-    // }
-    // });
-
-    // uncomment these lines to use stand-alone locators
-    // int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(4);
-    // setLocatorPorts(ports);
-
-    for (int i = 0; i < 20; i++) {
-      LogWriterUtils.getLogWriter().info("Test run: " + i);
-      runOnce();
-      tearDown();
-      setUp();
-    }
+  @Rule
+  public RepeatRule repeat = new RepeatRule();
+
+  @BeforeClass
+  public static void beforeClass() {
+    count = 0;
   }
 
+  @Before
+  public void before() {
+    count++;
+  }
 
-  static int LOCATOR_PORT;
-  static String LOCATORS_STRING;
+  @After
+  public void after() {
+    disconnectAllFromDS();
 
-  static int[] locatorPorts;
+  }
 
-  public void setLocatorPorts(int[] ports) {
-    DistributedTestUtils.deleteLocatorStateFile(ports);
-    String locators = "";
-    for (int i = 0; i < ports.length; i++) {
-      if (i > 0) {
-        locators += ",";
-      }
-      locators += "localhost[" + ports[i] + "]";
-    }
-    final String locators_string = locators;
-    for (int i = 0; i < ports.length; i++) {
-      final int port = ports[i];
-      Host.getHost(0).getVM(i).invoke(new SerializableRunnable("set locator port") {
-        public void run() {
-          LOCATOR_PORT = port;
-          LOCATORS_STRING = locators_string;
-        }
-      });
-    }
-    locatorPorts = ports;
+  @AfterClass
+  public static void afterClass() {
+    assertThat(count).isEqualTo(20);
   }
 
   @Override
-  public final void postTearDownCacheTestCase() throws Exception {
-    if (locatorPorts != null) {
-      DistributedTestUtils.deleteLocatorStateFile(locatorPorts);
-    }
+  public Properties getDistributedSystemProperties() {
+    Properties props = super.getDistributedSystemProperties();
+    props.setProperty(LOG_LEVEL, "info");
+    props.setProperty(CONSERVE_SOCKETS, "false");
+    return props;
   }
 
   /**
    * This test creates 4 vms and starts a cache in each VM. If that doesn't hang, it destroys the DS
    * in all vms and recreates the cache.
-   * 
-   * @throws Throwable
    */
-  public void runOnce() throws Throwable {
+  @Test
+  @Repeat(20)
+  public void testManyConnectsAndDisconnects() throws Exception {
+    logger.info("Test run: {}", count);
 
     int numVMs = 4;
-
     VM[] vms = new VM[numVMs];
 
     for (int i = 0; i < numVMs; i++) {
-      // if(i == 0) {
-      // vms[i] = Host.getHost(0).getVM(4);
-      // } else {
       vms[i] = Host.getHost(0).getVM(i);
-      // }
     }
 
     AsyncInvocation[] asyncs = new AsyncInvocation[numVMs];
@@ -120,44 +102,14 @@ public class ConnectDisconnectDUnitTest extends JUnit4CacheTestCase {
       asyncs[i] = vms[i].invokeAsync(new SerializableRunnable("Create a cache") {
         @Override
         public void run() {
-          // try {
-          // JGroupMembershipManager.setDebugJGroups(true);
           getCache();
-          // } finally {
-          // JGroupMembershipManager.setDebugJGroups(false);
-          // }
         }
       });
     }
 
-
     for (int i = 0; i < numVMs; i++) {
-      asyncs[i].getResult();
-      // try {
-      // asyncs[i].getResult(30 * 1000);
-      // } catch(TimeoutException e) {
-      // getLogWriter().severe("DAN DEBUG - we have a hang");
-      // dumpAllStacks();
-      // fail("DAN - WE HIT THE ISSUE",e);
-      // throw e;
-      // }
-    }
-
-    disconnectAllFromDS();
-  }
-
-
-  @Override
-  public Properties getDistributedSystemProperties() {
-    Properties props = super.getDistributedSystemProperties();
-    props.setProperty(LOG_LEVEL, "info");
-    props.setProperty(CONSERVE_SOCKETS, "false");
-    if (LOCATOR_PORT > 0) {
-      props.setProperty(START_LOCATOR, "localhost[" + LOCATOR_PORT + "]");
-      props.setProperty(LOCATORS, LOCATORS_STRING);
+      asyncs[i].await();
     }
-    return props;
   }
 
-
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/64404037/geode-core/src/test/java/org/apache/geode/test/dunit/internal/DistributedTestFixture.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/internal/DistributedTestFixture.java b/geode-core/src/test/java/org/apache/geode/test/dunit/internal/DistributedTestFixture.java
index 4175e81..b372696 100755
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/internal/DistributedTestFixture.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/internal/DistributedTestFixture.java
@@ -28,7 +28,7 @@ public interface DistributedTestFixture extends Serializable {
    * <p>
    * Override this as needed. Default implementation is empty.
    */
-  public void preSetUp() throws Exception;
+  void preSetUp() throws Exception;
 
   /**
    * {@code postSetUp()} is invoked after {@code DistributedTestCase#setUp()}.
@@ -36,7 +36,7 @@ public interface DistributedTestFixture extends Serializable {
    * <p>
    * Override this as needed. Default implementation is empty.
    */
-  public void postSetUp() throws Exception;
+  void postSetUp() throws Exception;
 
   /**
    * {@code preTearDown()} is invoked before {@code DistributedTestCase#tearDown()}.
@@ -44,7 +44,7 @@ public interface DistributedTestFixture extends Serializable {
    * <p>
    * Override this as needed. Default implementation is empty.
    */
-  public void preTearDown() throws Exception;
+  void preTearDown() throws Exception;
 
   /**
    * {@code postTearDown()} is invoked after {@code DistributedTestCase#tearDown()}.
@@ -52,7 +52,7 @@ public interface DistributedTestFixture extends Serializable {
    * <p>
    * Override this as needed. Default implementation is empty.
    */
-  public void postTearDown() throws Exception;
+  void postTearDown() throws Exception;
 
   /**
    * {@code preTearDownAssertions()} is invoked before any tear down methods have been invoked. If
@@ -61,7 +61,7 @@ public interface DistributedTestFixture extends Serializable {
    * <p>
    * Override this as needed. Default implementation is empty.
    */
-  public void preTearDownAssertions() throws Exception;
+  void preTearDownAssertions() throws Exception;
 
   /**
    * {@code postTearDownAssertions()} is invoked after all tear down methods have completed. This
@@ -70,7 +70,7 @@ public interface DistributedTestFixture extends Serializable {
    * <p>
    * Override this as needed. Default implementation is empty.
    */
-  public void postTearDownAssertions() throws Exception;
+  void postTearDownAssertions() throws Exception;
 
   /**
    * Returns the {@code Properties} used to define the {@code DistributedSystem}.
@@ -79,11 +79,11 @@ public interface DistributedTestFixture extends Serializable {
    * Override this as needed. This method is called by various {@code getSystem} methods in
    * {@code DistributedTestCase}.
    */
-  public Properties getDistributedSystemProperties();
+  Properties getDistributedSystemProperties();
 
   /**
    * Returns the {@code name} of the test method being executed.
    */
-  public String getName();
+  String getName();
 
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/64404037/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit3DistributedTestCase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit3DistributedTestCase.java b/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit3DistributedTestCase.java
index abdac89..fc0f2f6 100755
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit3DistributedTestCase.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit3DistributedTestCase.java
@@ -18,13 +18,10 @@ import java.io.Serializable;
 import java.util.Properties;
 
 import junit.framework.TestCase;
-import org.apache.logging.log4j.Logger;
 import org.junit.experimental.categories.Category;
 
-import org.apache.geode.cache.Cache;
 import org.apache.geode.distributed.DistributedSystem;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
-import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.test.junit.categories.DistributedTest;
 
 /**
@@ -34,8 +31,6 @@ import org.apache.geode.test.junit.categories.DistributedTest;
 public abstract class JUnit3DistributedTestCase extends TestCase
     implements DistributedTestFixture, Serializable {
 
-  private static final Logger logger = LogService.getLogger();
-
   private final JUnit4DistributedTestCase delegate = new JUnit4DistributedTestCase(this) {};
 
   /**
@@ -47,19 +42,12 @@ public abstract class JUnit3DistributedTestCase extends TestCase
     JUnit4DistributedTestCase.initializeDistributedTestCase();
   }
 
-  // ---------------------------------------------------------------------------
-  // methods for tests
-  // ---------------------------------------------------------------------------
-
   /**
    * @deprecated Please override {@link #getDistributedSystemProperties()} instead.
    */
   @Deprecated
-  public final void setSystem(final Properties props, final DistributedSystem ds) { // TODO:
-                                                                                    // override
-                                                                                    // getDistributedSystemProperties
-                                                                                    // and then
-                                                                                    // delete
+  public final void setSystem(final Properties props, final DistributedSystem ds) {
+    // TODO: override getDistributedSystemProperties and then delete
     delegate.setSystem(props, ds);
   }
 
@@ -100,10 +88,6 @@ public abstract class JUnit3DistributedTestCase extends TestCase
     return delegate.basicGetSystem();
   }
 
-  public final void nullSystem() { // TODO: delete
-    delegate.nullSystem();
-  }
-
   public static final InternalDistributedSystem getSystemStatic() {
     return JUnit4DistributedTestCase.getSystemStatic();
   }
@@ -146,10 +130,6 @@ public abstract class JUnit3DistributedTestCase extends TestCase
     JUnit4DistributedTestCase.disconnectFromDS();
   }
 
-  // ---------------------------------------------------------------------------
-  // name methods
-  // ---------------------------------------------------------------------------
-
   public static final String getTestMethodName() {
     return JUnit4DistributedTestCase.getTestMethodName();
   }
@@ -162,10 +142,6 @@ public abstract class JUnit3DistributedTestCase extends TestCase
     return delegate.getUniqueName();
   }
 
-  // ---------------------------------------------------------------------------
-  // setup methods
-  // ---------------------------------------------------------------------------
-
   /**
    * Sets up the DistributedTestCase.
    * <p>
@@ -174,7 +150,7 @@ public abstract class JUnit3DistributedTestCase extends TestCase
    */
   @Override
   public final void setUp() throws Exception {
-    delegate.setUp();
+    delegate.setUpJUnit4DistributedTestCase();
   }
 
   /**
@@ -184,7 +160,9 @@ public abstract class JUnit3DistributedTestCase extends TestCase
    * <p>
    * Override this as needed. Default implementation is empty.
    */
-  public void preSetUp() throws Exception {}
+  public void preSetUp() throws Exception {
+    // nothing by default
+  }
 
   /**
    * {@code postSetUp()} is invoked after
@@ -193,11 +171,9 @@ public abstract class JUnit3DistributedTestCase extends TestCase
    * <p>
    * Override this as needed. Default implementation is empty.
    */
-  public void postSetUp() throws Exception {}
-
-  // ---------------------------------------------------------------------------
-  // teardown methods
-  // ---------------------------------------------------------------------------
+  public void postSetUp() throws Exception {
+    // nothing by default
+  }
 
   /**
    * Tears down the DistributedTestCase.
@@ -219,7 +195,9 @@ public abstract class JUnit3DistributedTestCase extends TestCase
    * <p>
    * Override this as needed. Default implementation is empty.
    */
-  public void preTearDown() throws Exception {}
+  public void preTearDown() throws Exception {
+    // nothing by default
+  }
 
   /**
    * {@code postTearDown()} is invoked after
@@ -228,7 +206,9 @@ public abstract class JUnit3DistributedTestCase extends TestCase
    * <p>
    * Override this as needed. Default implementation is empty.
    */
-  public void postTearDown() throws Exception {}
+  public void postTearDown() throws Exception {
+    // nothing by default
+  }
 
   /**
    * {@code preTearDownAssertions()} is invoked before any tear down methods have been invoked. If
@@ -237,7 +217,9 @@ public abstract class JUnit3DistributedTestCase extends TestCase
    * <p>
    * Override this as needed. Default implementation is empty.
    */
-  public void preTearDownAssertions() throws Exception {}
+  public void preTearDownAssertions() throws Exception {
+    // nothing by default
+  }
 
   /**
    * {@code postTearDownAssertions()} is invoked after all tear down methods have completed. This
@@ -246,10 +228,8 @@ public abstract class JUnit3DistributedTestCase extends TestCase
    * <p>
    * Override this as needed. Default implementation is empty.
    */
-  public void postTearDownAssertions() throws Exception {}
-
-  protected static final void destroyRegions(final Cache cache) { // TODO: this should move to
-                                                                  // CacheTestCase
-    JUnit4DistributedTestCase.destroyRegions(cache);
+  public void postTearDownAssertions() throws Exception {
+    // nothing by default
   }
+
 }


[31/32] geode git commit: 1279: rename tests with old bug system numbers

Posted by kl...@apache.org.
http://git-wip-us.apache.org/repos/asf/geode/blob/64404037/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java b/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java
index 5a679bb..352b6cf 100644
--- a/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java
+++ b/geode-core/src/test/java/org/apache/geode/test/dunit/internal/JUnit4DistributedTestCase.java
@@ -18,6 +18,11 @@ import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS;
 import static org.apache.geode.distributed.ConfigurationProperties.LOG_FILE;
 import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
 import static org.apache.geode.distributed.ConfigurationProperties.STATISTIC_ARCHIVE_FILE;
+import static org.apache.geode.test.dunit.DistributedTestUtils.getAllDistributedSystemProperties;
+import static org.apache.geode.test.dunit.DistributedTestUtils.unregisterInstantiatorsInThisVM;
+import static org.apache.geode.test.dunit.Invoke.invokeInEveryVM;
+import static org.apache.geode.test.dunit.Invoke.invokeInLocator;
+import static org.apache.geode.test.dunit.LogWriterUtils.getLogWriter;
 import static org.junit.Assert.assertNotNull;
 
 import org.apache.geode.admin.internal.AdminDistributedSystemImpl;
@@ -50,11 +55,8 @@ import org.apache.geode.internal.net.SocketCreator;
 import org.apache.geode.internal.net.SocketCreatorFactory;
 import org.apache.geode.management.internal.cli.LogWrapper;
 import org.apache.geode.test.dunit.DUnitBlackboard;
-import org.apache.geode.test.dunit.DistributedTestUtils;
 import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.IgnoredException;
-import org.apache.geode.test.dunit.Invoke;
-import org.apache.geode.test.dunit.LogWriterUtils;
 import org.apache.geode.test.dunit.standalone.DUnitLauncher;
 import org.apache.geode.test.junit.rules.serializable.SerializableTestName;
 import org.apache.logging.log4j.Logger;
@@ -64,10 +66,8 @@ import org.junit.BeforeClass;
 import org.junit.Rule;
 
 import java.io.Serializable;
-import java.text.DecimalFormat;
-import java.util.Iterator;
 import java.util.LinkedHashSet;
-import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
 
@@ -75,10 +75,9 @@ import java.util.Set;
  * This class is the base class for all distributed tests using JUnit 4.
  */
 public abstract class JUnit4DistributedTestCase implements DistributedTestFixture, Serializable {
-
   private static final Logger logger = LogService.getLogger();
 
-  private static final Set<String> testHistory = new LinkedHashSet<String>();
+  private static final Set<String> testHistory = new LinkedHashSet<>();
 
   /** This VM's connection to the distributed system */
   private static InternalDistributedSystem system;
@@ -86,10 +85,7 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
   private static Properties lastSystemProperties;
   private static volatile String testMethodName;
 
-  /** For formatting timing info */
-  private static final DecimalFormat format = new DecimalFormat("###.###");
-
-  private static boolean reconnect = false;
+  private static DUnitBlackboard blackboard;
 
   private static final boolean logPerTest = Boolean.getBoolean("dunitLogPerTest");
 
@@ -116,17 +112,6 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
   @Rule
   public SerializableTestName testNameForDistributedTestCase = new SerializableTestName();
 
-  private static DUnitBlackboard blackboard;
-
-  /**
-   * Returns a DUnitBlackboard that can be used to pass data between VMs and synchronize actions.
-   * 
-   * @return the blackboard
-   */
-  public DUnitBlackboard getBlackboard() {
-    return blackboard;
-  }
-
   @BeforeClass
   public static final void initializeDistributedTestCase() {
     DUnitLauncher.launchIfNeeded();
@@ -147,19 +132,12 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
     return this.distributedTestFixture.getClass();
   }
 
-  // ---------------------------------------------------------------------------
-  // methods for tests
-  // ---------------------------------------------------------------------------
-
   /**
    * @deprecated Please override {@link #getDistributedSystemProperties()} instead.
    */
   @Deprecated
-  public final void setSystem(final Properties props, final DistributedSystem ds) { // TODO:
-                                                                                    // override
-                                                                                    // getDistributedSystemProperties
-                                                                                    // and then
-                                                                                    // delete
+  public final void setSystem(final Properties props, final DistributedSystem ds) {
+    // TODO: override getDistributedSystemProperties and then delete
     system = (InternalDistributedSystem) ds;
     lastSystemProperties = props;
     lastSystemCreatedInTest = getTestClass(); // used to be getDeclaringClass()
@@ -183,9 +161,10 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
     if (system == null) {
       system = InternalDistributedSystem.getAnyInstance();
     }
+
     if (system == null || !system.isConnected()) {
       // Figure out our distributed system properties
-      Properties p = DistributedTestUtils.getAllDistributedSystemProperties(props);
+      Properties p = getAllDistributedSystemProperties(props);
       lastSystemCreatedInTest = getTestClass(); // used to be getDeclaringClass()
       if (logPerTest) {
         String testMethod = getTestMethodName();
@@ -197,36 +176,37 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
       }
       system = (InternalDistributedSystem) DistributedSystem.connect(p);
       lastSystemProperties = p;
+
     } else {
       boolean needNewSystem = false;
       if (!getTestClass().equals(lastSystemCreatedInTest)) { // used to be getDeclaringClass()
-        Properties newProps = DistributedTestUtils.getAllDistributedSystemProperties(props);
+        Properties newProps = getAllDistributedSystemProperties(props);
         needNewSystem = !newProps.equals(lastSystemProperties);
         if (needNewSystem) {
-          LogWriterUtils.getLogWriter()
+          getLogWriter()
               .info("Test class has changed and the new DS properties are not an exact match. "
                   + "Forcing DS disconnect. Old props = " + lastSystemProperties + "new props="
                   + newProps);
         }
+
       } else {
         Properties activeProps = system.getProperties();
-        for (Iterator iter = props.entrySet().iterator(); iter.hasNext();) {
-          Map.Entry entry = (Map.Entry) iter.next();
+        for (Entry<Object, Object> entry : props.entrySet()) {
           String key = (String) entry.getKey();
           String value = (String) entry.getValue();
           if (!value.equals(activeProps.getProperty(key))) {
             needNewSystem = true;
-            LogWriterUtils.getLogWriter().info("Forcing DS disconnect. For property " + key
-                + " old value = " + activeProps.getProperty(key) + " new value = " + value);
+            getLogWriter().info("Forcing DS disconnect. For property " + key + " old value = "
+                + activeProps.getProperty(key) + " new value = " + value);
             break;
           }
         }
       }
+
       if (needNewSystem) {
         // the current system does not meet our needs to disconnect and
         // call recursively to get a new system.
-        LogWriterUtils.getLogWriter()
-            .info("Disconnecting from current DS in order to make a new one");
+        getLogWriter().info("Disconnecting from current DS in order to make a new one");
         disconnectFromDS();
         getSystem(props);
       }
@@ -305,14 +285,13 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
 
   public static final void disconnectAllFromDS() {
     disconnectFromDS();
-    Invoke.invokeInEveryVM("disconnectFromDS", () -> disconnectFromDS());
+    invokeInEveryVM("disconnectFromDS", () -> disconnectFromDS());
   }
 
   /**
    * Disconnects this VM from the distributed system
    */
   public static final void disconnectFromDS() {
-    // setTestMethodName(null);
     GemFireCacheImpl.testCacheXml = null;
     if (system != null) {
       system.disconnect();
@@ -326,20 +305,24 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
       }
       try {
         ds.disconnect();
-      } catch (Exception e) {
-        // ignore
+      } catch (Exception ignore) {
       }
     }
 
     AdminDistributedSystemImpl ads = AdminDistributedSystemImpl.getConnectedInstance();
-    if (ads != null) {// && ads.isConnected()) {
+    if (ads != null) {
       ads.disconnect();
     }
   }
 
-  // ---------------------------------------------------------------------------
-  // name methods
-  // ---------------------------------------------------------------------------
+  /**
+   * Returns a DUnitBlackboard that can be used to pass data between VMs and synchronize actions.
+   *
+   * @return the blackboard
+   */
+  public DUnitBlackboard getBlackboard() {
+    return blackboard;
+  }
 
   public static final String getTestMethodName() {
     return testMethodName;
@@ -358,10 +341,6 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
     return getTestClass().getSimpleName() + "_" + getName();
   }
 
-  // ---------------------------------------------------------------------------
-  // setup methods
-  // ---------------------------------------------------------------------------
-
   /**
    * Sets up the DistributedTestCase.
    *
@@ -370,7 +349,7 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
    * setUp() or override {@link #postSetUp()} with work that needs to occur after setUp().
    */
   @Before
-  public final void setUp() throws Exception {
+  public final void setUpJUnit4DistributedTestCase() throws Exception {
     preSetUp();
     setUpDistributedTestCase();
     postSetUp();
@@ -455,11 +434,10 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
         .set(new InternalDistributedSystem.CreationStackGenerator() {
           @Override
           public Throwable generateCreationStack(final DistributionConfig config) {
-            final StringBuilder sb = new StringBuilder();
-            final String[] validAttributeNames = config.getAttributeNames();
-            for (int i = 0; i < validAttributeNames.length; i++) {
-              final String attName = validAttributeNames[i];
-              final Object actualAtt = config.getAttributeObject(attName);
+            StringBuilder sb = new StringBuilder();
+            String[] validAttributeNames = config.getAttributeNames();
+            for (String attName : validAttributeNames) {
+              Object actualAtt = config.getAttributeObject(attName);
               String actualAttStr = actualAtt.toString();
               sb.append("  ");
               sb.append(attName);
@@ -487,10 +465,6 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
     System.out.println("Previously run tests: " + testHistory);
   }
 
-  // ---------------------------------------------------------------------------
-  // teardown methods
-  // ---------------------------------------------------------------------------
-
   /**
    * Tears down the DistributedTestCase.
    *
@@ -515,8 +489,7 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
   }
 
   private final void tearDownDistributedTestCase() throws Exception {
-    Invoke.invokeInEveryVM("tearDownCreationStackGenerator",
-        () -> tearDownCreationStackGenerator());
+    invokeInEveryVM("tearDownCreationStackGenerator", () -> tearDownCreationStackGenerator());
     if (logPerTest) {
       disconnectAllFromDS();
     }
@@ -524,7 +497,6 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
     if (!getDistributedSystemProperties().isEmpty()) {
       disconnectAllFromDS();
     }
-
   }
 
   /**
@@ -569,10 +541,10 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
 
   private static final void cleanupAllVms() {
     tearDownVM();
-    Invoke.invokeInEveryVM("tearDownVM", () -> tearDownVM());
-    Invoke.invokeInLocator(() -> {
+    invokeInEveryVM("tearDownVM", () -> tearDownVM());
+    invokeInLocator(() -> {
       DistributionMessageObserver.setInstance(null);
-      DistributedTestUtils.unregisterInstantiatorsInThisVM();
+      unregisterInstantiatorsInThisVM();
     });
     DUnitLauncher.closeAndCheckForSuspects();
   }
@@ -580,6 +552,7 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
   private static final void tearDownVM() {
     closeCache();
     disconnectFromDS();
+
     // keep alphabetized to detect duplicate lines
     CacheCreation.clearThreadLocals();
     CacheServerLauncher.clearStatics();
@@ -588,7 +561,7 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
     ClientServerTestCase.AUTO_LOAD_BALANCE = false;
     ClientStatsManager.cleanupForTests();
     DiskStoreObserver.setInstance(null);
-    DistributedTestUtils.unregisterInstantiatorsInThisVM();
+    unregisterInstantiatorsInThisVM();
     DistributionMessageObserver.setInstance(null);
     GlobalLockingDUnitTest.region_testBug32356 = null;
     InitialImageOperation.slowImageProcessing = 0;
@@ -614,7 +587,8 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
     SocketCreatorFactory.close();
   }
 
-  private static final void closeCache() { // TODO: this should move to CacheTestCase
+  // TODO: this should move to CacheTestCase
+  private static final void closeCache() {
     GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
     if (cache != null && !cache.isClosed()) {
       destroyRegions(cache);
@@ -622,12 +596,11 @@ public abstract class JUnit4DistributedTestCase implements DistributedTestFixtur
     }
   }
 
-  protected static final void destroyRegions(final Cache cache) { // TODO: this should move to
-                                                                  // CacheTestCase
+  // TODO: this should move to CacheTestCase
+  protected static final void destroyRegions(final Cache cache) {
     if (cache != null && !cache.isClosed()) {
       // try to destroy the root regions first so that we clean up any persistent files.
-      for (Iterator itr = cache.rootRegions().iterator(); itr.hasNext();) {
-        Region root = (Region) itr.next();
+      for (Region<?, ?> root : cache.rootRegions()) {
         String regionFullPath = root == null ? null : root.getFullPath();
         // for colocated regions you can't locally destroy a partitioned region.
         if (root.isDestroyed() || root instanceof HARegion || root instanceof PartitionedRegion) {

http://git-wip-us.apache.org/repos/asf/geode/blob/64404037/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableErrorCollector.java
----------------------------------------------------------------------
diff --git a/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableErrorCollector.java b/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableErrorCollector.java
new file mode 100644
index 0000000..5557f1b
--- /dev/null
+++ b/geode-junit/src/main/java/org/apache/geode/test/junit/rules/serializable/SerializableErrorCollector.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.test.junit.rules.serializable;
+
+import org.junit.rules.ErrorCollector;
+
+import java.io.Serializable;
+
+public class SerializableErrorCollector extends ErrorCollector implements Serializable {
+}


[26/32] geode git commit: GEODE-2952 document quoting of exact match Lucene queries

Posted by kl...@apache.org.
GEODE-2952 document quoting of exact match Lucene queries

    This closes #545


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/c08d70dd
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/c08d70dd
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/c08d70dd

Branch: refs/heads/feature/GEODE-1279
Commit: c08d70dd4695dbbad17f86fa4227a13497ab0b08
Parents: fa808ac
Author: Karen Miller <km...@pivotal.io>
Authored: Fri May 26 15:10:43 2017 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Tue May 30 13:57:07 2017 -0700

----------------------------------------------------------------------
 .../gfsh/command-pages/search.html.md.erb            | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/c08d70dd/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb
index 6cdf362..1c63ecf 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb
@@ -62,7 +62,7 @@ search lucene --name=value --region=value --queryStrings=value --defaultField=va
 </tr>
 <tr>
 <td><span class="keyword parmname" style="whitespace:nowrap;">&#8209;&#8208;queryStrings</span></td>
-<td><em>Required</em>. Query string to search the lucene index.</td>
+<td><em>Required</em>. Query string to search the lucene index. Surround a string with double quote marks to do an exact match of the string.</td>
 <td> </td>
 </tr>
 <tr>
@@ -84,21 +84,28 @@ search lucene --name=value --region=value --queryStrings=value --defaultField=va
 **Example Commands:**
 
 ``` pre
-gfsh> search lucene --name=testIndex --region=/testRegion --queryStrings=value1 --defaultField=__REGION_VALUE_FIELD
+gfsh> search lucene --name=testIndex --region=/testRegion --queryStrings=value1
+      --defaultField=__REGION_VALUE_FIELD
  
+
+gfsh> search lucene --name=indexOfStrings --region=/stringTestRegion 
+      --queryStrings='__REGION_VALUE_FIELD:"my exact string"'
+      --defaultField=__REGION_VALUE_FIELD
 ```
 
 **Sample Output:**
 
 ``` pre
-gfsh>search lucene --name=testIndex --region=/testRegion --queryStrings=value* --defaultField=__REGION_VALUE_FIELD
+gfsh>search lucene --name=testIndex --region=/testRegion --queryStrings=value* 
+     --defaultField=__REGION_VALUE_FIELD
 key | value  | score
 --- | ------ | -----
 3   | value3 | 1
 2   | value2 | 1
 1   | value1 | 1
 
-gfsh>search lucene --region=/Person --name=analyzerIndex --defaultField=address --queryStrings="97763"
+gfsh>search lucene --region=/Person --name=analyzerIndex --defaultField=address 
+     --queryStrings="97763"
  key   |                                                   value                                                   | score
 ------ | --------------------------------------------------------------------------------------------------------- | ---------
 key763 | Person{name='Fred Freeloader', email='ffl@example.com', address='763 Miles Dv, Portland_OR_97763', re.. | 1.6694657


[04/32] geode git commit: GEODE-2958: Destroying a defined index now removes the RegionListener

Posted by kl...@apache.org.
GEODE-2958: Destroying a defined index now removes the RegionListener


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/662358fd
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/662358fd
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/662358fd

Branch: refs/heads/feature/GEODE-1279
Commit: 662358fdba33ce2ea99cadfe07303361b26ead56
Parents: dff937f
Author: Barry Oglesby <bo...@pivotal.io>
Authored: Tue May 23 17:27:01 2017 -0700
Committer: Barry Oglesby <bo...@pivotal.io>
Committed: Wed May 24 10:06:50 2017 -0700

----------------------------------------------------------------------
 .../lucene/internal/LuceneServiceImpl.java      |  3 +
 .../lucene/LuceneIndexDestroyDUnitTest.java     | 39 +++++++++--
 .../LuceneDestroyIndexFunctionJUnitTest.java    | 72 ++++++++++++--------
 3 files changed, 79 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/662358fd/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
index ebee59e..afbcc40 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
@@ -232,6 +232,9 @@ public class LuceneServiceImpl implements InternalLuceneService {
   }
 
   public void destroyDefinedIndex(String indexName, String regionPath) {
+    if (!regionPath.startsWith("/")) {
+      regionPath = "/" + regionPath;
+    }
     String uniqueIndexName = LuceneServiceImpl.getUniqueIndexName(indexName, regionPath);
     if (definedIndexMap.containsKey(uniqueIndexName)) {
       definedIndexMap.remove(uniqueIndexName);

http://git-wip-us.apache.org/repos/asf/geode/blob/662358fd/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexDestroyDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexDestroyDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexDestroyDUnitTest.java
index a6252c8..b34d998 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexDestroyDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/LuceneIndexDestroyDUnitTest.java
@@ -401,7 +401,7 @@ public class LuceneIndexDestroyDUnitTest extends LuceneDUnitTest {
 
     // Recreate index and region
     String newIndexName = INDEX_NAME + "+_1";
-    SerializableRunnableIF createIndexNewName = createIndex(newIndexName, "field1");
+    SerializableRunnableIF createIndexNewName = createIndex(newIndexName, REGION_NAME, "field1");
     dataStore1.invoke(() -> initDataStore(createIndexNewName, regionType));
     dataStore2.invoke(() -> initDataStore(createIndexNewName, regionType));
     accessor.invoke(() -> initAccessor(createIndexNewName, regionType));
@@ -455,7 +455,7 @@ public class LuceneIndexDestroyDUnitTest extends LuceneDUnitTest {
     dataStore1.invoke(() -> destroyDataRegion(true));
 
     // Create new index and region
-    SerializableRunnableIF createNewIndex = createIndex(INDEX_NAME, "field2");
+    SerializableRunnableIF createNewIndex = createIndex(INDEX_NAME, REGION_NAME, "field2");
     dataStore1.invoke(() -> initDataStore(createNewIndex, regionType));
     dataStore2.invoke(() -> initDataStore(createNewIndex, regionType));
     accessor.invoke(() -> initAccessor(createNewIndex, regionType));
@@ -472,14 +472,26 @@ public class LuceneIndexDestroyDUnitTest extends LuceneDUnitTest {
     accessor.invoke(() -> executeQuery(INDEX_NAME, "field2Value", "field2", numPuts));
   }
 
+  @Test
+  @Parameters(method = "getListOfRegionTestTypes")
+  public void verifyCreateDestroyDefinedIndex(RegionTestableType regionType) {
+    String[] regionNames = {REGION_NAME, "/" + REGION_NAME};
+    for (String regionName : regionNames) {
+      dataStore1.invoke(createIndex(INDEX_NAME, regionName, "field1"));
+      dataStore1.invoke(() -> verifyDefinedIndexCreated(INDEX_NAME, regionName));
+      dataStore1.invoke(() -> destroyDefinedIndex(INDEX_NAME, regionName));
+      dataStore1.invoke(() -> verifyDefinedIndexDestroyed(INDEX_NAME, regionName));
+    }
+  }
+
   private SerializableRunnableIF createIndex() {
-    return createIndex(INDEX_NAME, "field1");
+    return createIndex(INDEX_NAME, REGION_NAME, "field1");
   }
 
-  private SerializableRunnableIF createIndex(String indexName, String field) {
+  private SerializableRunnableIF createIndex(String indexName, String regionName, String field) {
     return () -> {
       LuceneService luceneService = LuceneServiceProvider.get(getCache());
-      luceneService.createIndexFactory().setFields(field).create(indexName, REGION_NAME);
+      luceneService.createIndexFactory().setFields(field).create(indexName, regionName);
     };
   }
 
@@ -502,6 +514,18 @@ public class LuceneIndexDestroyDUnitTest extends LuceneDUnitTest {
     assertNotNull(luceneService.getIndex(INDEX2_NAME, REGION_NAME));
   }
 
+  private void verifyDefinedIndexCreated(String indexName, String regionName) {
+    LuceneServiceImpl luceneService = (LuceneServiceImpl) LuceneServiceProvider.get(getCache());
+    assertNotNull(luceneService.getDefinedIndex(indexName, regionName));
+    assertEquals(1, getCache().getRegionListeners().size());
+  }
+
+  private void verifyDefinedIndexDestroyed(String indexName, String regionName) {
+    LuceneServiceImpl luceneService = (LuceneServiceImpl) LuceneServiceProvider.get(getCache());
+    assertNull(luceneService.getDefinedIndex(indexName, regionName));
+    assertEquals(0, getCache().getRegionListeners().size());
+  }
+
   private void waitUntilFlushed(String indexName) throws Exception {
     LuceneService luceneService = LuceneServiceProvider.get(getCache());
     assertTrue(
@@ -598,6 +622,11 @@ public class LuceneIndexDestroyDUnitTest extends LuceneDUnitTest {
     luceneService.destroyIndex(INDEX_NAME, REGION_NAME);
   }
 
+  private void destroyDefinedIndex(String indexName, String regionName) {
+    LuceneServiceImpl luceneService = (LuceneServiceImpl) LuceneServiceProvider.get(getCache());
+    luceneService.destroyDefinedIndex(indexName, regionName);
+  }
+
   private void destroyIndexes() {
     LuceneService luceneService = LuceneServiceProvider.get(getCache());
     luceneService.destroyIndexes(REGION_NAME);

http://git-wip-us.apache.org/repos/asf/geode/blob/662358fd/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneDestroyIndexFunctionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneDestroyIndexFunctionJUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneDestroyIndexFunctionJUnitTest.java
index 1d2d773..ce0344d 100644
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneDestroyIndexFunctionJUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneDestroyIndexFunctionJUnitTest.java
@@ -56,58 +56,66 @@ public class LuceneDestroyIndexFunctionJUnitTest {
   @Test
   @SuppressWarnings("unchecked")
   public void testDestroyIndex() throws Throwable {
-    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo("index1", "/region1", false);
+    String indexName = "index1";
+    String regionPath = "/region1";
+    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo(indexName, regionPath, false);
     when(this.context.getArguments()).thenReturn(indexInfo);
     LuceneDestroyIndexFunction function = new LuceneDestroyIndexFunction();
     function = spy(function);
     doReturn(this.cache).when(function).getCache();
     function.execute(this.context);
-    verify(this.service).destroyIndex(eq("index1"), eq("/region1"));
-    verify(function).getXmlEntity(eq("index1"), eq("/region1"));
-    verify(this.service, never()).destroyDefinedIndex(eq("index1"), eq("/region1"));
-    verify(this.service, never()).destroyIndexes(eq("/region1"));
+    verify(this.service).destroyIndex(eq(indexName), eq(regionPath));
+    verify(function).getXmlEntity(eq(indexName), eq(regionPath));
+    verify(this.service, never()).destroyDefinedIndex(eq(indexName), eq(regionPath));
+    verify(this.service, never()).destroyIndexes(eq(regionPath));
     verifyFunctionResult(true);
   }
 
   @Test
   @SuppressWarnings("unchecked")
   public void testDestroyIndexFailure() throws Throwable {
-    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo("index1", "/region1", false);
+    String indexName = "index1";
+    String regionPath = "/region1";
+    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo(indexName, regionPath, false);
     when(this.context.getArguments()).thenReturn(indexInfo);
     LuceneDestroyIndexFunction function = new LuceneDestroyIndexFunction();
     function = spy(function);
     doReturn(this.cache).when(function).getCache();
-    doThrow(new IllegalStateException()).when(this.service).destroyIndex(eq("index1"),
-        eq("/region1"));
+    doThrow(new IllegalStateException()).when(this.service).destroyIndex(eq(indexName),
+        eq(regionPath));
     function.execute(this.context);
     verifyFunctionResult(false);
   }
 
   @Test
   public void testDestroyDefinedIndex() throws Throwable {
-    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo("index1", "/region1", true);
+    String indexName = "index1";
+    String regionPath = "/region1";
+    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo(indexName, regionPath, true);
     when(this.context.getArguments()).thenReturn(indexInfo);
     LuceneDestroyIndexFunction function = new LuceneDestroyIndexFunction();
     function = spy(function);
     doReturn(this.cache).when(function).getCache();
     function.execute(this.context);
-    verify(this.service).destroyDefinedIndex(eq("index1"), eq("/region1"));
-    verify(this.service, never()).destroyIndex(eq("index1"), eq("/region1"));
-    verify(this.service, never()).destroyIndexes(eq("/region1"));
-    verify(function, never()).getXmlEntity(eq("index1"), eq("/region1"));
+    verify(this.service).destroyDefinedIndex(eq(indexName), eq(regionPath));
+    verify(this.service, never()).destroyIndex(eq(indexName), eq(regionPath));
+    verify(this.service, never()).destroyIndexes(eq(regionPath));
+    verify(function, never()).getXmlEntity(eq(indexName), eq(regionPath));
     verifyFunctionResult(true);
   }
 
   @Test
   @SuppressWarnings("unchecked")
   public void testDestroyDefinedIndexFailure() throws Throwable {
-    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo("index1", "/region1", true);
+    String indexName = "index1";
+    String regionPath = "/region1";
+    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo(indexName, regionPath, true);
     when(this.context.getArguments()).thenReturn(indexInfo);
     LuceneDestroyIndexFunction function = new LuceneDestroyIndexFunction();
     function = spy(function);
     doReturn(this.cache).when(function).getCache();
-    doThrow(new IllegalStateException()).when(this.service).destroyDefinedIndex(eq("index1"),
-        eq("/region1"));
+    doThrow(new IllegalStateException()).when(this.service).destroyDefinedIndex(eq(indexName),
+        eq(regionPath));
     function.execute(this.context);
     verifyFunctionResult(false);
   }
@@ -115,28 +123,30 @@ public class LuceneDestroyIndexFunctionJUnitTest {
   @Test
   @SuppressWarnings("unchecked")
   public void testDestroyIndexes() throws Throwable {
-    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo(null, "/region1", false);
+    String regionPath = "/region1";
+    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo(null, regionPath, false);
     when(this.context.getArguments()).thenReturn(indexInfo);
     LuceneDestroyIndexFunction function = new LuceneDestroyIndexFunction();
     function = spy(function);
     doReturn(this.cache).when(function).getCache();
     function.execute(this.context);
-    verify(this.service).destroyIndexes(eq("/region1"));
-    verify(function).getXmlEntity(eq(null), eq("/region1"));
-    verify(this.service, never()).destroyDefinedIndexes(eq("/region1"));
-    verify(this.service, never()).destroyIndex(any(), eq("/region1"));
+    verify(this.service).destroyIndexes(eq(regionPath));
+    verify(function).getXmlEntity(eq(null), eq(regionPath));
+    verify(this.service, never()).destroyDefinedIndexes(eq(regionPath));
+    verify(this.service, never()).destroyIndex(any(), eq(regionPath));
     verifyFunctionResult(true);
   }
 
   @Test
   @SuppressWarnings("unchecked")
   public void testDestroyIndexesFailure() throws Throwable {
-    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo(null, "/region1", false);
+    String regionPath = "/region1";
+    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo(null, regionPath, false);
     when(this.context.getArguments()).thenReturn(indexInfo);
     LuceneDestroyIndexFunction function = new LuceneDestroyIndexFunction();
     function = spy(function);
     doReturn(this.cache).when(function).getCache();
-    doThrow(new IllegalStateException()).when(this.service).destroyIndexes(eq("/region1"));
+    doThrow(new IllegalStateException()).when(this.service).destroyIndexes(eq(regionPath));
     function.execute(this.context);
     verifyFunctionResult(false);
   }
@@ -144,28 +154,30 @@ public class LuceneDestroyIndexFunctionJUnitTest {
   @Test
   @SuppressWarnings("unchecked")
   public void testDestroyDefinedIndexes() throws Throwable {
-    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo(null, "/region1", true);
+    String regionPath = "/region1";
+    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo(null, regionPath, true);
     when(this.context.getArguments()).thenReturn(indexInfo);
     LuceneDestroyIndexFunction function = new LuceneDestroyIndexFunction();
     function = spy(function);
     doReturn(this.cache).when(function).getCache();
     function.execute(this.context);
-    verify(this.service).destroyDefinedIndexes(eq("/region1"));
-    verify(this.service, never()).destroyIndexes(eq("/region1"));
-    verify(this.service, never()).destroyIndex(any(), eq("/region1"));
-    verify(function, never()).getXmlEntity(eq("index1"), eq("/region1"));
+    verify(this.service).destroyDefinedIndexes(eq(regionPath));
+    verify(this.service, never()).destroyIndexes(eq(regionPath));
+    verify(this.service, never()).destroyIndex(any(), eq(regionPath));
+    verify(function, never()).getXmlEntity(eq("index1"), eq(regionPath));
     verifyFunctionResult(true);
   }
 
   @Test
   @SuppressWarnings("unchecked")
   public void testDestroyDefinedIndexesFailure() throws Throwable {
-    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo(null, "/region1", true);
+    String regionPath = "/region1";
+    LuceneDestroyIndexInfo indexInfo = new LuceneDestroyIndexInfo(null, regionPath, true);
     when(this.context.getArguments()).thenReturn(indexInfo);
     LuceneDestroyIndexFunction function = new LuceneDestroyIndexFunction();
     function = spy(function);
     doReturn(this.cache).when(function).getCache();
-    doThrow(new IllegalStateException()).when(this.service).destroyDefinedIndexes(eq("/region1"));
+    doThrow(new IllegalStateException()).when(this.service).destroyDefinedIndexes(eq(regionPath));
     function.execute(this.context);
     verifyFunctionResult(false);
   }


[20/32] geode git commit: GEODE-2951 Remove --pageSize from docs of gfsh search lucene

Posted by kl...@apache.org.
GEODE-2951 Remove --pageSize from docs of gfsh search lucene


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/d3543d22
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/d3543d22
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/d3543d22

Branch: refs/heads/feature/GEODE-1279
Commit: d3543d229de46a3a582881484bccf3b77d8ac505
Parents: 6e56a73
Author: Karen Miller <km...@pivotal.io>
Authored: Fri May 26 10:54:58 2017 -0700
Committer: Karen Miller <km...@pivotal.io>
Committed: Tue May 30 09:27:21 2017 -0700

----------------------------------------------------------------------
 .../tools_modules/gfsh/command-pages/search.html.md.erb       | 7 +------
 1 file changed, 1 insertion(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/d3543d22/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb
index 7f239e9..6cdf362 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/search.html.md.erb
@@ -31,7 +31,7 @@ See also [create lucene index](create.html#create_lucene_index), [describe lucen
 
 ``` pre
 search lucene --name=value --region=value --queryStrings=value --defaultField=value
-    [--limit=value] [--pageSize=value] [--keys-only=value]
+    [--limit=value] [--keys-only=value]
 ```
 
 **Parameters, search lucene:**
@@ -75,11 +75,6 @@ search lucene --name=value --region=value --queryStrings=value --defaultField=va
 <td>Number of search results needed.</td>
 <td>If the parameter is not specified: -1</td>
 </tr>
-<tr>
-<td><span class="keyword parmname">\-\-pageSize</span></td>
-<td>Number of results to be returned in a page.</td>
-<td>If the parameter is not specified: -1</td>
-</tr>
 <td><span class="keyword parmname">\-\-keys-only</span></td>
 <td>Return only keys of search results.</td>
 <td>If the parameter is not specified: false</td>


[10/32] geode git commit: GEODE-2950: Adding validation checks on create lucene index parameter names

Posted by kl...@apache.org.
GEODE-2950: Adding validation checks on create lucene index parameter names

	This closes #532


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/c793f74c
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/c793f74c
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/c793f74c

Branch: refs/heads/feature/GEODE-1279
Commit: c793f74c07c3488ba188ed927144be688bd50b19
Parents: 0dae918
Author: David Anuta <da...@gmail.com>
Authored: Wed May 24 16:21:33 2017 -0700
Committer: nabarun <nn...@pivotal.io>
Committed: Thu May 25 11:20:56 2017 -0700

----------------------------------------------------------------------
 .../lucene/internal/LuceneServiceImpl.java      | 15 ++++++++----
 .../functions/LuceneCreateIndexFunction.java    |  6 +++--
 .../cli/LuceneIndexCommandsDUnitTest.java       | 25 ++++++++++++++++----
 3 files changed, 36 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/c793f74c/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
index 3859804..c0d6266 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java
@@ -128,7 +128,7 @@ public class LuceneServiceImpl implements InternalLuceneService {
     return getUniqueIndexName(indexName, regionPath) + regionSuffix;
   }
 
-  public static void validateRegionName(String name) {
+  public static void validateCreateIndexCommandParams(String name, boolean isRegionPath) {
     if (name == null) {
       throw new IllegalArgumentException(
           LocalizedStrings.LocalRegion_NAME_CANNOT_BE_NULL.toLocalizedString());
@@ -140,15 +140,22 @@ public class LuceneServiceImpl implements InternalLuceneService {
 
     if (name.startsWith("__")) {
       throw new IllegalArgumentException(
-          "Region names may not begin with a double-underscore: " + name);
+          "Parameter names may not begin with a double-underscore: " + name);
+    }
+
+    final Pattern NAME_PATTERN;
+    if (isRegionPath) {
+      NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_./]+");
+    } else {
+      NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_.]+");
     }
 
-    final Pattern NAME_PATTERN = Pattern.compile("[aA-zZ0-9-_./]+");
     // Ensure the region only contains valid characters
     Matcher matcher = NAME_PATTERN.matcher(name);
     if (!matcher.matches()) {
       throw new IllegalArgumentException(
-          "Region names may only be alphanumeric and may contain hyphens or underscores: " + name);
+          "Parameter names may only be alphanumeric, though they can contain hyphens or underscores: "
+              + name);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/c793f74c/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
index 422b1ef..26ac0e2 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
@@ -15,7 +15,7 @@
 
 package org.apache.geode.cache.lucene.internal.cli.functions;
 
-import static org.apache.geode.cache.lucene.internal.LuceneServiceImpl.validateRegionName;
+import static org.apache.geode.cache.lucene.internal.LuceneServiceImpl.validateCreateIndexCommandParams;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.geode.cache.Cache;
@@ -67,6 +67,8 @@ public class LuceneCreateIndexFunction extends FunctionAdapter implements Intern
       memberId = cache.getDistributedSystem().getDistributedMember().getId();
       LuceneService service = LuceneServiceProvider.get(cache);
 
+      validateCreateIndexCommandParams(indexInfo.getIndexName(), false);
+
       String[] fields = indexInfo.getSearchableFieldNames();
       String[] analyzerName = indexInfo.getFieldAnalyzers();
 
@@ -84,7 +86,7 @@ public class LuceneCreateIndexFunction extends FunctionAdapter implements Intern
         }
       }
 
-      validateRegionName(indexInfo.getRegionPath());
+      validateCreateIndexCommandParams(indexInfo.getRegionPath(), true);
       indexFactory.create(indexInfo.getIndexName(), indexInfo.getRegionPath());
 
       // TODO - update cluster configuration by returning a valid XmlEntity

http://git-wip-us.apache.org/repos/asf/geode/blob/c793f74c/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
index 04359a3..5e9c4f9 100755
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
@@ -198,7 +198,7 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
   }
 
   @Test
-  public void createIndexShouldNotAcceptEmptyRegionNames() {
+  public void createIndexShouldNotAcceptBadIndexOrRegionNames() {
     final VM vm1 = Host.getHost(0).getVM(-1);
     vm1.invoke(() -> {
       getCache();
@@ -210,7 +210,7 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
     csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
 
     String resultAsString = executeCommandAndLogResult(csb);
-    assertTrue(resultAsString.contains("Region names may not begin with a double-underscore:"));
+    assertTrue(resultAsString.contains("Parameter names may not begin with a double-underscore:"));
 
     csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
     csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, INDEX_NAME);
@@ -218,8 +218,25 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
     csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
 
     resultAsString = executeCommandAndLogResult(csb);
-    assertTrue(resultAsString
-        .contains("Region names may only be alphanumeric and may contain hyphens or underscores:"));
+    assertTrue(resultAsString.contains(
+        "Parameter names may only be alphanumeric, though they can contain hyphens or underscores:"));
+
+    csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
+    csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, "\'__\'");
+    csb.addOption(LuceneCliStrings.LUCENE__REGION_PATH, REGION_NAME);
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
+
+    resultAsString = executeCommandAndLogResult(csb);
+    assertTrue(resultAsString.contains("Parameter names may not begin with a double-underscore:"));
+
+    csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
+    csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, "\' @@@*%\'");
+    csb.addOption(LuceneCliStrings.LUCENE__REGION_PATH, REGION_NAME);
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
+
+    resultAsString = executeCommandAndLogResult(csb);
+    assertTrue(resultAsString.contains(
+        "Parameter names may only be alphanumeric, though they can contain hyphens or underscores:"));
   }
 
   @Test


[19/32] geode git commit: GEODE-2941 Pulse doc update - add logging config

Posted by kl...@apache.org.
GEODE-2941 Pulse doc update - add logging config


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/6e56a732
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/6e56a732
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/6e56a732

Branch: refs/heads/feature/GEODE-1279
Commit: 6e56a7325336f5c9579a2ac90b7a74e760f52357
Parents: 56f976c
Author: Dave Barnes <db...@pivotal.io>
Authored: Fri May 26 10:46:58 2017 -0700
Committer: Dave Barnes <db...@pivotal.io>
Committed: Fri May 26 14:51:36 2017 -0700

----------------------------------------------------------------------
 .../tools_modules/pulse/pulse-auth.html.md.erb  | 11 -----
 .../pulse/pulse-hosted.html.md.erb              | 44 +++++++++++++++-----
 2 files changed, 33 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/6e56a732/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb b/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb
index d834592..1d791e0 100644
--- a/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb
+++ b/geode-docs/tools_modules/pulse/pulse-auth.html.md.erb
@@ -50,14 +50,3 @@ When the `http` SSL component is enabled, all HTTP services become
 SSL-enabled and you must configure your client applications
 accordingly. For SSL-enabled Pulse, you will need to configure your
 browsers with proper certificates.
-
-If a JMX manager or locator is configured to use SSL, you can configure Pulse to connect to these
-processes. Create a file named `pulsesecurity.properties` and save it somewhere in the classpath of
-your Web application server. Include standard Java SSL properties, such as:
-
-```
-javax.net.ssl.keyStore={KeyStorePath}
-javax.net.ssl.keyStorePassword={KeyStorePassword}
-javax.net.ssl.trustStore={TrustStorePath}
-javax.net.ssl.trustStorePassword={TrustStorePassword}
-```

http://git-wip-us.apache.org/repos/asf/geode/blob/6e56a732/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
----------------------------------------------------------------------
diff --git a/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb b/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
index ceed530..af9b1f5 100644
--- a/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
+++ b/geode-docs/tools_modules/pulse/pulse-hosted.html.md.erb
@@ -24,9 +24,15 @@ Host Pulse on a dedicated Web application server to make the Pulse application a
 To host Pulse on a Web application server:
 
 1.  Set the `http-service-port` property to zero (`-Dgemfire.http-service-port=0`) when you start your Geode JMX Manager nodes. Setting this property to zero disables the embedded Web server for hosting the Pulse application.
-2.  Create a `pulse.properties` file somewhere in the classpath of your Web application server. For example, if you are hosting Pulse on Tomcat, create the `pulse.properties` file in the `$TOMCAT_SERVER/lib` directory.
 
-3.  Define the following configuration properties in the `pulse.properties` file:
+2.  Deploy the Pulse Web application to your application server. Geode installs the
+`geode-pulse-n.n.n.war` file (where `n.n.n` is a version number) in the `tools/Pulse` subdirectory
+of your Geode installation directory. Depending on your application server, you may need to copy the
+`pulse.war` file to a deployment directory or use a configuration tool to deploy the file.
+
+3.  Stop the Web application server and locate the Pulse configuration in the `WEB-INF/classes` subdirectory.
+
+4.  Edit `pulse.properties`, defining or redefining any of the following configuration properties as needed for your application:
 
     <table>
     <colgroup>
@@ -58,32 +64,48 @@ To host Pulse on a Web application server:
     </tbody>
     </table>
 
-    For example, with this configuration Pulse connects to the locator at mylocator\[10334\] and accesses any available JMX Manager:
+    &nbsp;
+
+    For example, with the default `pulse.properties` configuration, Pulse connects to the locator at port 10334 and accesses any available JMX Manager:
 
     ``` pre
     pulse.useLocator=true
-    pulse.host=locsrv.gemstone.com
+    pulse.host=localhost
     pulse.port=10334
     ```
 
-    With this configuration Pulse accesses only the JMX Manager instance at manager1\[8080\]:
+    With this modified configuration, Pulse accesses only the JMX Manager instance at port 8080:
 
     ``` pre
     pulse.useLocator=false
-    pulse.host=jmxsrv.gemstone.com
+    pulse.host=jmxsrv.mycluster.com
     pulse.port=8080
     ```
 
-4.  (Optional.) Configure authentication for the Pulse Web application using the instructions in [Configuring Pulse Authentication](pulse-auth.html).
+5.  If a JMX manager or locator is configured to use SSL, you can configure Pulse to connect to these
+    processes. Edit `pulsesecurity.properties` to un-comment the standard Java SSL properties:
+
+    ```
+    javax.net.ssl.keyStore={KeyStorePath}
+    javax.net.ssl.keyStorePassword={KeyStorePassword}
+    javax.net.ssl.trustStore={TrustStorePath}
+    javax.net.ssl.trustStorePassword={TrustStorePassword}
+    ```
+
+    Substitute the appropriate paths and passwords for the bracketed placeholders.
+
+7.  Restart the Web application server.
 
-5.  Deploy the Pulse Web application to your application server. Geode installs the `pulse.war` file in the `tools/Pulse` subdirectory of your Geode installation directory. Depending on your application server, you may need to copy the `pulse.war` file to a deployment directory or use a configuration tool to deploy the file.
-6.  Access the Pulse application using the address, port, and application URL that you configure in your Web application server. For example, with Tomcat the default URL is http://*address*:8080/pulse. Your application server provides options for configuring the address, port, and application name; substitute the correct items to access the deployed Pulse application.
+8.  Access the Pulse application using the address, port, and application URL that you configured in
+your Web application server. For example, with Tomcat the default URL is
+`http://*address*:8080/pulse`. Your application server provides options for configuring the address,
+port, and application name; substitute the correct items to access the deployed Pulse application.
 
     Pulse connects to the locator or JMX Manager that you configured in the `pulse.properties` file, authenticating using the credentials that you configured in the file.
 
-7.  If you have configured authentication for the Pulse application, enter the username and password of a valid Pulse account in the login screen. Otherwise, enter the default "admin" in both fields. Click **Sign In** to continue.
+9.  If you have configured authentication for the Pulse application, enter the username and password of a valid Pulse account in the login screen. Otherwise, enter the default "admin" in both fields. Click **Sign In** to continue.
 
     See [Configuring Pulse Authentication](pulse-auth.html).
 
-8.  After you log in, Pulse displays the main cluster view for the distributed system to which it has connected. See [Using Pulse Views](pulse-views.html).
+10.  After you log in, Pulse displays the main cluster view for the distributed system to which it has connected. See [Using Pulse Views](pulse-views.html).
 


[17/32] geode git commit: GEODE-2957: Lucene create index DEFAULT keyword added for standardAnalyzer

Posted by kl...@apache.org.
GEODE-2957: Lucene create index DEFAULT keyword added for standardAnalyzer

	This closes #537


Project: http://git-wip-us.apache.org/repos/asf/geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/geode/commit/29ea88a2
Tree: http://git-wip-us.apache.org/repos/asf/geode/tree/29ea88a2
Diff: http://git-wip-us.apache.org/repos/asf/geode/diff/29ea88a2

Branch: refs/heads/feature/GEODE-1279
Commit: 29ea88a23ef0feb29e8d7684c4061ac54dc66874
Parents: 5ab4a69
Author: David Anuta <da...@gmail.com>
Authored: Thu May 25 13:45:23 2017 -0700
Committer: nabarun <nn...@pivotal.io>
Committed: Thu May 25 16:54:25 2017 -0700

----------------------------------------------------------------------
 .../functions/LuceneCreateIndexFunction.java    |  2 +-
 .../cli/LuceneIndexCommandsDUnitTest.java       | 68 +++++++++++++++++---
 2 files changed, 59 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/geode/blob/29ea88a2/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
index d49f7f9..5e36efa 100644
--- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
+++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/cli/functions/LuceneCreateIndexFunction.java
@@ -105,7 +105,7 @@ public class LuceneCreateIndexFunction extends FunctionAdapter implements Intern
       className = StandardAnalyzer.class.getCanonicalName();
     else {
       String trimmedClassName = StringUtils.trim(className);
-      if (trimmedClassName.equals("") || trimmedClassName.equals("null"))
+      if (trimmedClassName.equals("") || trimmedClassName.equals("DEFAULT"))
         className = StandardAnalyzer.class.getCanonicalName();
       else
         className = trimmedClassName;

http://git-wip-us.apache.org/repos/asf/geode/blob/29ea88a2/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
index 5cbe31c..009c74c 100755
--- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
+++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/cli/LuceneIndexCommandsDUnitTest.java
@@ -298,33 +298,81 @@ public class LuceneIndexCommandsDUnitTest extends CliCommandTestBase {
   }
 
   @Test
-  public void createIndexWithNullAnalyzerShouldUseStandardAnalyzer() throws Exception {
-    final VM vm1 = Host.getHost(0).getVM(1);
+  public void createIndexWithWhitespaceOrDefaultKeywordAnalyzerShouldUseStandardAnalyzer()
+      throws Exception {
+    final VM vm1 = Host.getHost(0).getVM(-1);
     vm1.invoke(() -> {
       getCache();
     });
 
-    String analyzerList = StandardAnalyzer.class.getCanonicalName() + ",null,"
+    // Test whitespace analyzer name
+    String analyzerList = StandardAnalyzer.class.getCanonicalName() + ",     ,"
         + KeywordAnalyzer.class.getCanonicalName();
     CommandStringBuilder csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
-    csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, INDEX_NAME);
+    csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, "space");
     csb.addOption(LuceneCliStrings.LUCENE__REGION_PATH, REGION_NAME);
     csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
-    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__ANALYZER, analyzerList);
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__ANALYZER, "'" + analyzerList + "'");
 
     String resultAsString = executeCommandAndLogResult(csb);
 
+    // Test empty analyzer name
+    analyzerList =
+        StandardAnalyzer.class.getCanonicalName() + ",," + KeywordAnalyzer.class.getCanonicalName();
+    csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
+    csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, "empty");
+    csb.addOption(LuceneCliStrings.LUCENE__REGION_PATH, REGION_NAME);
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__ANALYZER, analyzerList);
+
+    resultAsString = executeCommandAndLogResult(csb);
+
+    // Test keyword analyzer name
+    analyzerList = StandardAnalyzer.class.getCanonicalName() + ",DEFAULT,"
+        + KeywordAnalyzer.class.getCanonicalName();
+    csb = new CommandStringBuilder(LuceneCliStrings.LUCENE_CREATE_INDEX);
+    csb.addOption(LuceneCliStrings.LUCENE__INDEX_NAME, "keyword");
+    csb.addOption(LuceneCliStrings.LUCENE__REGION_PATH, REGION_NAME);
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__FIELD, "field1,field2,field3");
+    csb.addOption(LuceneCliStrings.LUCENE_CREATE_INDEX__ANALYZER, analyzerList);
+
+    resultAsString = executeCommandAndLogResult(csb);
+
     vm1.invoke(() -> {
       LuceneService luceneService = LuceneServiceProvider.get(getCache());
       createRegion();
-      final LuceneIndex index = luceneService.getIndex(INDEX_NAME, REGION_NAME);
-      final Map<String, Analyzer> fieldAnalyzers = index.getFieldAnalyzers();
+      final LuceneIndex spaceIndex = luceneService.getIndex("space", REGION_NAME);
+      final Map<String, Analyzer> spaceFieldAnalyzers = spaceIndex.getFieldAnalyzers();
+
+      final LuceneIndex emptyIndex = luceneService.getIndex("empty", REGION_NAME);
+      final Map<String, Analyzer> emptyFieldAnalyzers2 = emptyIndex.getFieldAnalyzers();
+
+      final LuceneIndex keywordIndex = luceneService.getIndex("keyword", REGION_NAME);
+      final Map<String, Analyzer> keywordFieldAnalyzers = keywordIndex.getFieldAnalyzers();
+
+      // Test whitespace analyzers
+      assertEquals(StandardAnalyzer.class.getCanonicalName(),
+          spaceFieldAnalyzers.get("field1").getClass().getCanonicalName());
+      assertEquals(StandardAnalyzer.class.getCanonicalName(),
+          spaceFieldAnalyzers.get("field2").getClass().getCanonicalName());
+      assertEquals(KeywordAnalyzer.class.getCanonicalName(),
+          spaceFieldAnalyzers.get("field3").getClass().getCanonicalName());
+
+      // Test empty analyzers
+      assertEquals(StandardAnalyzer.class.getCanonicalName(),
+          emptyFieldAnalyzers2.get("field1").getClass().getCanonicalName());
+      assertEquals(StandardAnalyzer.class.getCanonicalName(),
+          emptyFieldAnalyzers2.get("field2").getClass().getCanonicalName());
+      assertEquals(KeywordAnalyzer.class.getCanonicalName(),
+          emptyFieldAnalyzers2.get("field3").getClass().getCanonicalName());
+
+      // Test keyword analyzers
       assertEquals(StandardAnalyzer.class.getCanonicalName(),
-          fieldAnalyzers.get("field1").getClass().getCanonicalName());
+          keywordFieldAnalyzers.get("field1").getClass().getCanonicalName());
       assertEquals(StandardAnalyzer.class.getCanonicalName(),
-          fieldAnalyzers.get("field2").getClass().getCanonicalName());
+          keywordFieldAnalyzers.get("field2").getClass().getCanonicalName());
       assertEquals(KeywordAnalyzer.class.getCanonicalName(),
-          fieldAnalyzers.get("field3").getClass().getCanonicalName());
+          keywordFieldAnalyzers.get("field3").getClass().getCanonicalName());
     });
   }