You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2017/08/13 22:46:09 UTC

[17/19] geode git commit: GEODE-3436: revert recent refactoring of GFSH commands

http://git-wip-us.apache.org/repos/asf/geode/blob/645a32d0/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CompactDiskStoreCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CompactDiskStoreCommand.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CompactDiskStoreCommand.java
deleted file mode 100644
index d20bfb2..0000000
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CompactDiskStoreCommand.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.geode.management.internal.cli.commands;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.springframework.shell.core.annotation.CliCommand;
-import org.springframework.shell.core.annotation.CliOption;
-
-import org.apache.geode.cache.persistence.PersistentID;
-import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.distributed.internal.InternalDistributedSystem;
-import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
-import org.apache.geode.internal.cache.InternalCache;
-import org.apache.geode.management.DistributedSystemMXBean;
-import org.apache.geode.management.ManagementService;
-import org.apache.geode.management.cli.CliMetaData;
-import org.apache.geode.management.cli.ConverterHint;
-import org.apache.geode.management.cli.Result;
-import org.apache.geode.management.internal.cli.CliUtil;
-import org.apache.geode.management.internal.cli.LogWrapper;
-import org.apache.geode.management.internal.cli.i18n.CliStrings;
-import org.apache.geode.management.internal.cli.result.CompositeResultData;
-import org.apache.geode.management.internal.cli.result.ResultBuilder;
-import org.apache.geode.management.internal.messages.CompactRequest;
-import org.apache.geode.management.internal.security.ResourceOperation;
-import org.apache.geode.security.ResourcePermission;
-
-public class CompactDiskStoreCommand implements GfshCommand {
-  @CliCommand(value = CliStrings.COMPACT_DISK_STORE, help = CliStrings.COMPACT_DISK_STORE__HELP)
-  @CliMetaData(relatedTopic = {CliStrings.TOPIC_GEODE_DISKSTORE})
-  @ResourceOperation(resource = ResourcePermission.Resource.CLUSTER,
-      operation = ResourcePermission.Operation.MANAGE, target = ResourcePermission.Target.DISK)
-  public Result compactDiskStore(
-      @CliOption(key = CliStrings.COMPACT_DISK_STORE__NAME, mandatory = true,
-          optionContext = ConverterHint.DISKSTORE,
-          help = CliStrings.COMPACT_DISK_STORE__NAME__HELP) String diskStoreName,
-      @CliOption(key = {CliStrings.GROUP, CliStrings.GROUPS},
-          help = CliStrings.COMPACT_DISK_STORE__GROUP__HELP) String[] groups) {
-    Result result;
-
-    try {
-      // disk store exists validation
-      if (!diskStoreExists(diskStoreName)) {
-        result = ResultBuilder.createUserErrorResult(
-            CliStrings.format(CliStrings.COMPACT_DISK_STORE__DISKSTORE_0_DOES_NOT_EXIST,
-                new Object[] {diskStoreName}));
-      } else {
-        InternalDistributedSystem ds = getCache().getInternalDistributedSystem();
-
-        Map<DistributedMember, PersistentID> overallCompactInfo = new HashMap<>();
-
-        Set<?> otherMembers = ds.getDistributionManager().getOtherNormalDistributionManagerIds();
-        Set<InternalDistributedMember> allMembers = new HashSet<>();
-
-        for (Object member : otherMembers) {
-          allMembers.add((InternalDistributedMember) member);
-        }
-        allMembers.add(ds.getDistributedMember());
-
-        String groupInfo = "";
-        // if groups are specified, find members in the specified group
-        if (groups != null && groups.length > 0) {
-          groupInfo = CliStrings.format(CliStrings.COMPACT_DISK_STORE__MSG__FOR_GROUP,
-              new Object[] {Arrays.toString(groups) + "."});
-          final Set<InternalDistributedMember> selectedMembers = new HashSet<>();
-          List<String> targetedGroups = Arrays.asList(groups);
-          for (InternalDistributedMember member : allMembers) {
-            List<String> memberGroups = member.getGroups();
-            if (!Collections.disjoint(targetedGroups, memberGroups)) {
-              selectedMembers.add(member);
-            }
-          }
-
-          allMembers = selectedMembers;
-        }
-
-        // allMembers should not be empty when groups are not specified - it'll
-        // have at least one member
-        if (allMembers.isEmpty()) {
-          result = ResultBuilder.createUserErrorResult(
-              CliStrings.format(CliStrings.COMPACT_DISK_STORE__NO_MEMBERS_FOUND_IN_SPECIFED_GROUP,
-                  new Object[] {Arrays.toString(groups)}));
-        } else {
-          // first invoke on local member if it exists in the targeted set
-          if (allMembers.remove(ds.getDistributedMember())) {
-            PersistentID compactedDiskStoreId = CompactRequest.compactDiskStore(diskStoreName);
-            if (compactedDiskStoreId != null) {
-              overallCompactInfo.put(ds.getDistributedMember(), compactedDiskStoreId);
-            }
-          }
-
-          // was this local member the only one? Then don't try to send
-          // CompactRequest. Otherwise, send the request to others
-          if (!allMembers.isEmpty()) {
-            // Invoke compact on all 'other' members
-            Map<DistributedMember, PersistentID> memberCompactInfo =
-                CompactRequest.send(ds.getDistributionManager(), diskStoreName, allMembers);
-            if (memberCompactInfo != null && !memberCompactInfo.isEmpty()) {
-              overallCompactInfo.putAll(memberCompactInfo);
-              memberCompactInfo.clear();
-            }
-            String notExecutedMembers = CompactRequest.getNotExecutedMembers();
-            if (notExecutedMembers != null && !notExecutedMembers.isEmpty()) {
-              LogWrapper.getInstance()
-                  .info("compact disk-store \"" + diskStoreName
-                      + "\" message was scheduled to be sent to " + notExecutedMembers
-                      + ", but was not sent.");
-            }
-          }
-
-          // If compaction happened at all, then prepare the summary
-          if (overallCompactInfo != null && !overallCompactInfo.isEmpty()) {
-            CompositeResultData compositeResultData = ResultBuilder.createCompositeResultData();
-            CompositeResultData.SectionResultData section;
-
-            Set<Map.Entry<DistributedMember, PersistentID>> entries = overallCompactInfo.entrySet();
-
-            for (Map.Entry<DistributedMember, PersistentID> entry : entries) {
-              String memberId = entry.getKey().getId();
-              section = compositeResultData.addSection(memberId);
-              section.addData("On Member", memberId);
-
-              PersistentID persistentID = entry.getValue();
-              if (persistentID != null) {
-                CompositeResultData.SectionResultData subSection =
-                    section.addSection("DiskStore" + memberId);
-                subSection.addData("UUID", persistentID.getUUID());
-                subSection.addData("Host", persistentID.getHost().getHostName());
-                subSection.addData("Directory", persistentID.getDirectory());
-              }
-            }
-            compositeResultData.setHeader("Compacted " + diskStoreName + groupInfo);
-            result = ResultBuilder.buildResult(compositeResultData);
-          } else {
-            result = ResultBuilder.createInfoResult(
-                CliStrings.COMPACT_DISK_STORE__COMPACTION_ATTEMPTED_BUT_NOTHING_TO_COMPACT);
-          }
-        } // all members' if
-      } // disk store exists' if
-    } catch (RuntimeException e) {
-      LogWrapper.getInstance().info(e.getMessage(), e);
-      result = ResultBuilder.createGemFireErrorResult(
-          CliStrings.format(CliStrings.COMPACT_DISK_STORE__ERROR_WHILE_COMPACTING_REASON_0,
-              new Object[] {e.getMessage()}));
-    }
-    return result;
-  }
-
-  private boolean diskStoreExists(String diskStoreName) {
-    InternalCache cache = getCache();
-    ManagementService managementService = ManagementService.getExistingManagementService(cache);
-    DistributedSystemMXBean dsMXBean = managementService.getDistributedSystemMXBean();
-    Map<String, String[]> diskstore = dsMXBean.listMemberDiskstore();
-
-    Set<Map.Entry<String, String[]>> entrySet = diskstore.entrySet();
-
-    for (Map.Entry<String, String[]> entry : entrySet) {
-      String[] value = entry.getValue();
-      if (CliUtil.contains(value, diskStoreName)) {
-        return true;
-      }
-    }
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/645a32d0/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CompactOfflineDiskStoreCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CompactOfflineDiskStoreCommand.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CompactOfflineDiskStoreCommand.java
deleted file mode 100644
index ae73440..0000000
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CompactOfflineDiskStoreCommand.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.geode.management.internal.cli.commands;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import org.springframework.shell.core.annotation.CliCommand;
-import org.springframework.shell.core.annotation.CliOption;
-
-import org.apache.geode.GemFireIOException;
-import org.apache.geode.management.cli.CliMetaData;
-import org.apache.geode.management.cli.Result;
-import org.apache.geode.management.internal.cli.CliUtil;
-import org.apache.geode.management.internal.cli.GfshParser;
-import org.apache.geode.management.internal.cli.LogWrapper;
-import org.apache.geode.management.internal.cli.i18n.CliStrings;
-import org.apache.geode.management.internal.cli.result.ResultBuilder;
-import org.apache.geode.management.internal.cli.shell.Gfsh;
-import org.apache.geode.management.internal.cli.util.DiskStoreCompacter;
-
-public class CompactOfflineDiskStoreCommand implements GfshCommand {
-  @CliCommand(value = CliStrings.COMPACT_OFFLINE_DISK_STORE,
-      help = CliStrings.COMPACT_OFFLINE_DISK_STORE__HELP)
-  @CliMetaData(shellOnly = true, relatedTopic = {CliStrings.TOPIC_GEODE_DISKSTORE})
-  public Result compactOfflineDiskStore(
-      @CliOption(key = CliStrings.COMPACT_OFFLINE_DISK_STORE__NAME, mandatory = true,
-          help = CliStrings.COMPACT_OFFLINE_DISK_STORE__NAME__HELP) String diskStoreName,
-      @CliOption(key = CliStrings.COMPACT_OFFLINE_DISK_STORE__DISKDIRS, mandatory = true,
-          help = CliStrings.COMPACT_OFFLINE_DISK_STORE__DISKDIRS__HELP) String[] diskDirs,
-      @CliOption(key = CliStrings.COMPACT_OFFLINE_DISK_STORE__MAXOPLOGSIZE,
-          unspecifiedDefaultValue = "-1",
-          help = CliStrings.COMPACT_OFFLINE_DISK_STORE__MAXOPLOGSIZE__HELP) long maxOplogSize,
-      @CliOption(key = CliStrings.COMPACT_OFFLINE_DISK_STORE__J,
-          help = CliStrings.COMPACT_OFFLINE_DISK_STORE__J__HELP) String[] jvmProps) {
-    Result result;
-    LogWrapper logWrapper = LogWrapper.getInstance();
-
-    StringBuilder output = new StringBuilder();
-    StringBuilder error = new StringBuilder();
-    StringBuilder errorMessage = new StringBuilder();
-    Process compacterProcess = null;
-
-    try {
-      String validatedDirectories = DiskStoreCommandsUtils.validatedDirectories(diskDirs);
-      if (validatedDirectories != null) {
-        throw new IllegalArgumentException(
-            "Could not find " + CliStrings.COMPACT_OFFLINE_DISK_STORE__DISKDIRS + ": \""
-                + validatedDirectories + "\"");
-      }
-
-      List<String> commandList = new ArrayList<>();
-      commandList.add(System.getProperty("java.home") + File.separatorChar + "bin"
-          + File.separatorChar + "java");
-
-      DiskStoreCommandsUtils.configureLogging(commandList);
-
-      if (jvmProps != null && jvmProps.length != 0) {
-        commandList.addAll(Arrays.asList(jvmProps));
-      }
-      commandList.add("-classpath");
-      commandList.add(System.getProperty("java.class.path", "."));
-      commandList.add(DiskStoreCompacter.class.getName());
-
-      commandList.add(CliStrings.COMPACT_OFFLINE_DISK_STORE__NAME + "=" + diskStoreName);
-
-      if (diskDirs != null && diskDirs.length != 0) {
-        StringBuilder builder = new StringBuilder();
-        int arrayLength = diskDirs.length;
-        for (int i = 0; i < arrayLength; i++) {
-          if (File.separatorChar == '\\') {
-            builder.append(diskDirs[i].replace("\\", "/")); // see 46120
-          } else {
-            builder.append(diskDirs[i]);
-          }
-          if (i + 1 != arrayLength) {
-            builder.append(',');
-          }
-        }
-        commandList.add(CliStrings.COMPACT_OFFLINE_DISK_STORE__DISKDIRS + "=" + builder.toString());
-      }
-      // -1 is ignore as maxOplogSize
-      commandList.add(CliStrings.COMPACT_OFFLINE_DISK_STORE__MAXOPLOGSIZE + "=" + maxOplogSize);
-
-      ProcessBuilder procBuilder = new ProcessBuilder(commandList);
-      compacterProcess = procBuilder.start();
-      InputStream inputStream = compacterProcess.getInputStream();
-      InputStream errorStream = compacterProcess.getErrorStream();
-      BufferedReader inputReader = new BufferedReader(new InputStreamReader(inputStream));
-      BufferedReader errorReader = new BufferedReader(new InputStreamReader(errorStream));
-
-      String line;
-      while ((line = inputReader.readLine()) != null) {
-        output.append(line).append(GfshParser.LINE_SEPARATOR);
-      }
-
-      boolean switchToStackTrace = false;
-      while ((line = errorReader.readLine()) != null) {
-        if (!switchToStackTrace && DiskStoreCompacter.STACKTRACE_START.equals(line)) {
-          switchToStackTrace = true;
-        } else if (switchToStackTrace) {
-          error.append(line).append(GfshParser.LINE_SEPARATOR);
-        } else {
-          errorMessage.append(line);
-        }
-      }
-
-      if (errorMessage.length() > 0) {
-        throw new GemFireIOException(errorMessage.toString());
-      }
-
-      // do we have to waitFor??
-      compacterProcess.destroy();
-      result = ResultBuilder.createInfoResult(output.toString());
-    } catch (IOException e) {
-      if (output.length() != 0) {
-        Gfsh.println(output.toString());
-      }
-      String fieldsMessage = (maxOplogSize != -1
-          ? CliStrings.COMPACT_OFFLINE_DISK_STORE__MAXOPLOGSIZE + "=" + maxOplogSize + "," : "");
-      fieldsMessage += CliUtil.arrayToString(diskDirs);
-      String errorString = CliStrings.format(
-          CliStrings.COMPACT_OFFLINE_DISK_STORE__MSG__ERROR_WHILE_COMPACTING_DISKSTORE_0_WITH_1_REASON_2,
-          diskStoreName, fieldsMessage);
-      result = ResultBuilder.createUserErrorResult(errorString);
-      if (logWrapper.fineEnabled()) {
-        logWrapper.fine(e.getMessage(), e);
-      }
-    } catch (GemFireIOException e) {
-      if (output.length() != 0) {
-        Gfsh.println(output.toString());
-      }
-      result = ResultBuilder.createUserErrorResult(errorMessage.toString());
-      if (logWrapper.fineEnabled()) {
-        logWrapper.fine(error.toString());
-      }
-    } catch (IllegalArgumentException e) {
-      if (output.length() != 0) {
-        Gfsh.println(output.toString());
-      }
-      result = ResultBuilder.createUserErrorResult(e.getMessage());
-    } finally {
-      if (compacterProcess != null) {
-        try {
-          // just to check whether the process has exited
-          // Process.exitValue() throws IllegalThreadStateException if Process
-          // is alive
-          compacterProcess.exitValue();
-        } catch (IllegalThreadStateException ise) {
-          // not yet terminated, destroy the process
-          compacterProcess.destroy();
-        }
-      }
-    }
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/645a32d0/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
new file mode 100644
index 0000000..a8afa7d
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigCommands.java
@@ -0,0 +1,490 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.management.internal.cli.commands;
+
+import static org.apache.geode.distributed.ConfigurationProperties.STATISTIC_SAMPLING_ENABLED;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.geode.SystemFailure;
+import org.apache.geode.cache.execute.FunctionInvocationTargetException;
+import org.apache.geode.cache.execute.ResultCollector;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.internal.cache.xmlcache.CacheXml;
+import org.apache.geode.internal.logging.LogService;
+import org.apache.geode.internal.logging.log4j.LogLevel;
+import org.apache.geode.management.cli.CliMetaData;
+import org.apache.geode.management.cli.ConverterHint;
+import org.apache.geode.management.cli.Result;
+import org.apache.geode.management.internal.cli.AbstractCliAroundInterceptor;
+import org.apache.geode.management.internal.cli.CliUtil;
+import org.apache.geode.management.internal.cli.GfshParseResult;
+import org.apache.geode.management.internal.cli.domain.MemberConfigurationInfo;
+import org.apache.geode.management.internal.cli.functions.AlterRuntimeConfigFunction;
+import org.apache.geode.management.internal.cli.functions.CliFunctionResult;
+import org.apache.geode.management.internal.cli.functions.ExportConfigFunction;
+import org.apache.geode.management.internal.cli.functions.GetMemberConfigInformationFunction;
+import org.apache.geode.management.internal.cli.i18n.CliStrings;
+import org.apache.geode.management.internal.cli.result.CommandResultException;
+import org.apache.geode.management.internal.cli.result.CompositeResultData;
+import org.apache.geode.management.internal.cli.result.CompositeResultData.SectionResultData;
+import org.apache.geode.management.internal.cli.result.ErrorResultData;
+import org.apache.geode.management.internal.cli.result.InfoResultData;
+import org.apache.geode.management.internal.cli.result.ResultBuilder;
+import org.apache.geode.management.internal.cli.result.TabularResultData;
+import org.apache.geode.management.internal.cli.shell.Gfsh;
+import org.apache.geode.management.internal.configuration.domain.XmlEntity;
+import org.apache.geode.management.internal.security.ResourceOperation;
+import org.apache.geode.security.ResourcePermission.Operation;
+import org.apache.geode.security.ResourcePermission.Resource;
+import org.apache.logging.log4j.Logger;
+import org.springframework.shell.core.annotation.CliAvailabilityIndicator;
+import org.springframework.shell.core.annotation.CliCommand;
+import org.springframework.shell.core.annotation.CliOption;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.TreeSet;
+
+/****
+ * @since GemFire 7.0
+ *
+ */
+public class ConfigCommands implements GfshCommand {
+  private final ExportConfigFunction exportConfigFunction = new ExportConfigFunction();
+  private final GetMemberConfigInformationFunction getMemberConfigFunction =
+      new GetMemberConfigInformationFunction();
+  private final AlterRuntimeConfigFunction alterRunTimeConfigFunction =
+      new AlterRuntimeConfigFunction();
+  private static Logger logger = LogService.getLogger();
+
+  @CliCommand(value = {CliStrings.DESCRIBE_CONFIG}, help = CliStrings.DESCRIBE_CONFIG__HELP)
+  @CliMetaData(relatedTopic = {CliStrings.TOPIC_GEODE_CONFIG})
+  @ResourceOperation(resource = Resource.CLUSTER, operation = Operation.READ)
+  public Result describeConfig(
+      @CliOption(key = CliStrings.MEMBER, optionContext = ConverterHint.ALL_MEMBER_IDNAME,
+          help = CliStrings.DESCRIBE_CONFIG__MEMBER__HELP, mandatory = true) String memberNameOrId,
+      @CliOption(key = CliStrings.DESCRIBE_CONFIG__HIDE__DEFAULTS,
+          help = CliStrings.DESCRIBE_CONFIG__HIDE__DEFAULTS__HELP, unspecifiedDefaultValue = "true",
+          specifiedDefaultValue = "true") boolean hideDefaults) {
+
+    Result result = null;
+    try {
+      DistributedMember targetMember = null;
+
+      if (memberNameOrId != null && !memberNameOrId.isEmpty()) {
+        targetMember = CliUtil.getDistributedMemberByNameOrId(memberNameOrId);
+      }
+      if (targetMember != null) {
+        ResultCollector<?, ?> rc = CliUtil.executeFunction(getMemberConfigFunction,
+            new Boolean(hideDefaults), targetMember);
+        ArrayList<?> output = (ArrayList<?>) rc.getResult();
+        Object obj = output.get(0);
+
+        if (obj != null && obj instanceof MemberConfigurationInfo) {
+          MemberConfigurationInfo memberConfigInfo = (MemberConfigurationInfo) obj;
+
+          CompositeResultData crd = ResultBuilder.createCompositeResultData();
+          crd.setHeader(
+              CliStrings.format(CliStrings.DESCRIBE_CONFIG__HEADER__TEXT, memberNameOrId));
+
+          List<String> jvmArgsList = memberConfigInfo.getJvmInputArguments();
+          TabularResultData jvmInputArgs = crd.addSection().addSection().addTable();
+
+          for (String jvmArg : jvmArgsList) {
+            jvmInputArgs.accumulate("JVM command line arguments", jvmArg);
+          }
+
+          addSection(crd, memberConfigInfo.getGfePropsSetUsingApi(),
+              "GemFire properties defined using the API");
+          addSection(crd, memberConfigInfo.getGfePropsRuntime(),
+              "GemFire properties defined at the runtime");
+          addSection(crd, memberConfigInfo.getGfePropsSetFromFile(),
+              "GemFire properties defined with the property file");
+          addSection(crd, memberConfigInfo.getGfePropsSetWithDefaults(),
+              "GemFire properties using default values");
+          addSection(crd, memberConfigInfo.getCacheAttributes(), "Cache attributes");
+
+          List<Map<String, String>> cacheServerAttributesList =
+              memberConfigInfo.getCacheServerAttributes();
+
+          if (cacheServerAttributesList != null && !cacheServerAttributesList.isEmpty()) {
+            SectionResultData cacheServerSection = crd.addSection();
+            cacheServerSection.setHeader("Cache-server attributes");
+
+            for (Map<String, String> cacheServerAttributes : cacheServerAttributesList) {
+              addSubSection(cacheServerSection, cacheServerAttributes, "");
+            }
+          }
+          result = ResultBuilder.buildResult(crd);
+        }
+
+      } else {
+        ErrorResultData erd = ResultBuilder.createErrorResultData();
+        erd.addLine(CliStrings.format(CliStrings.DESCRIBE_CONFIG__MEMBER__NOT__FOUND,
+            new Object[] {memberNameOrId}));
+        result = ResultBuilder.buildResult(erd);
+      }
+    } catch (FunctionInvocationTargetException e) {
+      result = ResultBuilder.createGemFireErrorResult(CliStrings
+          .format(CliStrings.COULD_NOT_EXECUTE_COMMAND_TRY_AGAIN, CliStrings.DESCRIBE_CONFIG));
+    } catch (Exception e) {
+      ErrorResultData erd = ResultBuilder.createErrorResultData();
+      erd.addLine(e.getMessage());
+      result = ResultBuilder.buildResult(erd);
+    }
+    return result;
+  }
+
+
+  private void addSection(CompositeResultData crd, Map<String, String> attrMap, String headerText) {
+    if (attrMap != null && !attrMap.isEmpty()) {
+      SectionResultData section = crd.addSection();
+      section.setHeader(headerText);
+      section.addSeparator('.');
+      Set<String> attributes = new TreeSet<>(attrMap.keySet());
+
+      for (String attribute : attributes) {
+        String attributeValue = attrMap.get(attribute);
+        section.addData(attribute, attributeValue);
+      }
+    }
+  }
+
+  private void addSubSection(SectionResultData section, Map<String, String> attrMap,
+      String headerText) {
+    if (!attrMap.isEmpty()) {
+      SectionResultData subSection = section.addSection();
+      Set<String> attributes = new TreeSet<>(attrMap.keySet());
+      subSection.setHeader(headerText);
+
+      for (String attribute : attributes) {
+        String attributeValue = attrMap.get(attribute);
+        subSection.addData(attribute, attributeValue);
+      }
+    }
+  }
+
+  /**
+   * Export the cache configuration in XML format.
+   *
+   * @param member Member for which to write the configuration
+   * @param group Group or groups for which to write the configuration
+   * @return Results of the attempt to write the configuration
+   */
+  @CliCommand(value = {CliStrings.EXPORT_CONFIG}, help = CliStrings.EXPORT_CONFIG__HELP)
+  @CliMetaData(
+      interceptor = "org.apache.geode.management.internal.cli.commands.ConfigCommands$Interceptor",
+      relatedTopic = {CliStrings.TOPIC_GEODE_CONFIG})
+  @ResourceOperation(resource = Resource.CLUSTER, operation = Operation.READ)
+  public Result exportConfig(
+      @CliOption(key = {CliStrings.MEMBER, CliStrings.MEMBERS},
+          optionContext = ConverterHint.ALL_MEMBER_IDNAME,
+          help = CliStrings.EXPORT_CONFIG__MEMBER__HELP) String[] member,
+      @CliOption(key = {CliStrings.GROUP, CliStrings.GROUPS},
+          optionContext = ConverterHint.MEMBERGROUP,
+          help = CliStrings.EXPORT_CONFIG__GROUP__HELP) String[] group,
+      @CliOption(key = {CliStrings.EXPORT_CONFIG__DIR},
+          help = CliStrings.EXPORT_CONFIG__DIR__HELP) String dir) {
+    InfoResultData infoData = ResultBuilder.createInfoResultData();
+
+    Set<DistributedMember> targetMembers = CliUtil.findMembers(group, member);
+    if (targetMembers.isEmpty()) {
+      return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+    }
+
+    try {
+      ResultCollector<?, ?> rc =
+          CliUtil.executeFunction(this.exportConfigFunction, null, targetMembers);
+      List<CliFunctionResult> results = CliFunctionResult.cleanResults((List<?>) rc.getResult());
+
+      for (CliFunctionResult result : results) {
+        if (result.getThrowable() != null) {
+          infoData.addLine(CliStrings.format(CliStrings.EXPORT_CONFIG__MSG__EXCEPTION,
+              result.getMemberIdOrName(), result.getThrowable()));
+        } else if (result.isSuccessful()) {
+          String cacheFileName = result.getMemberIdOrName() + "-cache.xml";
+          String propsFileName = result.getMemberIdOrName() + "-gf.properties";
+          String[] fileContent = (String[]) result.getSerializables();
+          infoData.addAsFile(cacheFileName, fileContent[0], "Downloading Cache XML file: {0}",
+              false);
+          infoData.addAsFile(propsFileName, fileContent[1], "Downloading properties file: {0}",
+              false);
+        }
+      }
+      return ResultBuilder.buildResult(infoData);
+    } catch (VirtualMachineError e) {
+      SystemFailure.initiateFailure(e);
+      throw e;
+    } catch (Throwable th) {
+      SystemFailure.checkFailure();
+      th.printStackTrace(System.err);
+      return ResultBuilder
+          .createGemFireErrorResult(CliStrings.format(CliStrings.EXPORT_CONFIG__MSG__EXCEPTION,
+              th.getClass().getName() + ": " + th.getMessage()));
+    }
+  }
+
+
+  @CliCommand(value = {CliStrings.ALTER_RUNTIME_CONFIG},
+      help = CliStrings.ALTER_RUNTIME_CONFIG__HELP)
+  @CliMetaData(relatedTopic = {CliStrings.TOPIC_GEODE_CONFIG},
+      interceptor = "org.apache.geode.management.internal.cli.commands.ConfigCommands$AlterRuntimeInterceptor")
+  @ResourceOperation(resource = Resource.CLUSTER, operation = Operation.MANAGE)
+  public Result alterRuntimeConfig(
+      @CliOption(key = {CliStrings.MEMBER, CliStrings.MEMBERS},
+          optionContext = ConverterHint.ALL_MEMBER_IDNAME,
+          help = CliStrings.ALTER_RUNTIME_CONFIG__MEMBER__HELP) String[] memberNameOrId,
+      @CliOption(key = {CliStrings.GROUP, CliStrings.GROUPS},
+          optionContext = ConverterHint.MEMBERGROUP,
+          help = CliStrings.ALTER_RUNTIME_CONFIG__MEMBER__HELP) String[] group,
+      @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__DISK__SPACE__LIMIT},
+          help = CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__DISK__SPACE__LIMIT__HELP) Integer archiveDiskSpaceLimit,
+      @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__FILE__SIZE__LIMIT},
+          help = CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__FILE__SIZE__LIMIT__HELP) Integer archiveFileSizeLimit,
+      @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__LOG__DISK__SPACE__LIMIT},
+          help = CliStrings.ALTER_RUNTIME_CONFIG__LOG__DISK__SPACE__LIMIT__HELP) Integer logDiskSpaceLimit,
+      @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__LOG__FILE__SIZE__LIMIT},
+          help = CliStrings.ALTER_RUNTIME_CONFIG__LOG__FILE__SIZE__LIMIT__HELP) Integer logFileSizeLimit,
+      @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__LOG__LEVEL},
+          optionContext = ConverterHint.LOG_LEVEL,
+          help = CliStrings.ALTER_RUNTIME_CONFIG__LOG__LEVEL__HELP) String logLevel,
+      @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__ARCHIVE__FILE},
+          help = CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__ARCHIVE__FILE__HELP) String statisticArchiveFile,
+      @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__SAMPLE__RATE},
+          help = CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__SAMPLE__RATE__HELP) Integer statisticSampleRate,
+      @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__SAMPLING__ENABLED},
+          help = CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__SAMPLING__ENABLED__HELP) Boolean statisticSamplingEnabled,
+      @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__COPY__ON__READ},
+          specifiedDefaultValue = "false",
+          help = CliStrings.ALTER_RUNTIME_CONFIG__COPY__ON__READ__HELP) Boolean setCopyOnRead,
+      @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__LOCK__LEASE},
+          help = CliStrings.ALTER_RUNTIME_CONFIG__LOCK__LEASE__HELP) Integer lockLease,
+      @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__LOCK__TIMEOUT},
+          help = CliStrings.ALTER_RUNTIME_CONFIG__LOCK__TIMEOUT__HELP) Integer lockTimeout,
+      @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__MESSAGE__SYNC__INTERVAL},
+          help = CliStrings.ALTER_RUNTIME_CONFIG__MESSAGE__SYNC__INTERVAL__HELP) Integer messageSyncInterval,
+      @CliOption(key = {CliStrings.ALTER_RUNTIME_CONFIG__SEARCH__TIMEOUT},
+          help = CliStrings.ALTER_RUNTIME_CONFIG__SEARCH__TIMEOUT__HELP) Integer searchTimeout) {
+
+    Map<String, String> runTimeDistributionConfigAttributes = new HashMap<>();
+    Map<String, String> rumTimeCacheAttributes = new HashMap<>();
+    Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
+
+    if (targetMembers.isEmpty()) {
+      return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
+    }
+
+    if (archiveDiskSpaceLimit != null) {
+      runTimeDistributionConfigAttributes.put(
+          CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__DISK__SPACE__LIMIT,
+          archiveDiskSpaceLimit.toString());
+    }
+
+    if (archiveFileSizeLimit != null) {
+      runTimeDistributionConfigAttributes.put(
+          CliStrings.ALTER_RUNTIME_CONFIG__ARCHIVE__FILE__SIZE__LIMIT,
+          archiveFileSizeLimit.toString());
+    }
+
+    if (logDiskSpaceLimit != null) {
+      runTimeDistributionConfigAttributes.put(
+          CliStrings.ALTER_RUNTIME_CONFIG__LOG__DISK__SPACE__LIMIT, logDiskSpaceLimit.toString());
+    }
+
+    if (logFileSizeLimit != null) {
+      runTimeDistributionConfigAttributes.put(
+          CliStrings.ALTER_RUNTIME_CONFIG__LOG__FILE__SIZE__LIMIT, logFileSizeLimit.toString());
+    }
+
+    if (logLevel != null && !logLevel.isEmpty()) {
+      runTimeDistributionConfigAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__LOG__LEVEL,
+          logLevel);
+    }
+
+    if (statisticArchiveFile != null && !statisticArchiveFile.isEmpty()) {
+      runTimeDistributionConfigAttributes
+          .put(CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__ARCHIVE__FILE, statisticArchiveFile);
+    }
+
+    if (statisticSampleRate != null) {
+      runTimeDistributionConfigAttributes.put(
+          CliStrings.ALTER_RUNTIME_CONFIG__STATISTIC__SAMPLE__RATE, statisticSampleRate.toString());
+    }
+
+    if (statisticSamplingEnabled != null) {
+      runTimeDistributionConfigAttributes.put(STATISTIC_SAMPLING_ENABLED,
+          statisticSamplingEnabled.toString());
+    }
+
+
+    // Attributes that are set on the cache.
+    if (setCopyOnRead != null) {
+      rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__COPY__ON__READ,
+          setCopyOnRead.toString());
+    }
+
+    if (lockLease != null && lockLease > 0 && lockLease < Integer.MAX_VALUE) {
+      rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__LOCK__LEASE,
+          lockLease.toString());
+    }
+
+    if (lockTimeout != null && lockTimeout > 0 && lockTimeout < Integer.MAX_VALUE) {
+      rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__LOCK__TIMEOUT,
+          lockTimeout.toString());
+    }
+
+    if (messageSyncInterval != null && messageSyncInterval > 0
+        && messageSyncInterval < Integer.MAX_VALUE) {
+      rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__MESSAGE__SYNC__INTERVAL,
+          messageSyncInterval.toString());
+    }
+
+    if (searchTimeout != null && searchTimeout > 0 && searchTimeout < Integer.MAX_VALUE) {
+      rumTimeCacheAttributes.put(CliStrings.ALTER_RUNTIME_CONFIG__SEARCH__TIMEOUT,
+          searchTimeout.toString());
+    }
+
+    if (runTimeDistributionConfigAttributes.isEmpty() && rumTimeCacheAttributes.isEmpty()) {
+      return ResultBuilder
+          .createUserErrorResult(CliStrings.ALTER_RUNTIME_CONFIG__RELEVANT__OPTION__MESSAGE);
+    }
+
+    Map<String, String> allRunTimeAttributes = new HashMap<>();
+    allRunTimeAttributes.putAll(runTimeDistributionConfigAttributes);
+    allRunTimeAttributes.putAll(rumTimeCacheAttributes);
+
+    ResultCollector<?, ?> rc =
+        CliUtil.executeFunction(alterRunTimeConfigFunction, allRunTimeAttributes, targetMembers);
+    List<CliFunctionResult> results = CliFunctionResult.cleanResults((List<?>) rc.getResult());
+    Set<String> successfulMembers = new TreeSet<>();
+    Set<String> errorMessages = new TreeSet<>();
+
+    for (CliFunctionResult result : results) {
+      if (result.getThrowable() != null) {
+        logger.info("Function failed: " + result.getThrowable());
+        errorMessages.add(result.getThrowable().getMessage());
+      } else {
+        successfulMembers.add(result.getMemberIdOrName());
+      }
+    }
+    final String lineSeparator = System.getProperty("line.separator");
+    if (!successfulMembers.isEmpty()) {
+      StringBuilder successMessageBuilder = new StringBuilder();
+
+      successMessageBuilder.append(CliStrings.ALTER_RUNTIME_CONFIG__SUCCESS__MESSAGE);
+      successMessageBuilder.append(lineSeparator);
+
+      for (String member : successfulMembers) {
+        successMessageBuilder.append(member);
+        successMessageBuilder.append(lineSeparator);
+      }
+
+      Properties properties = new Properties();
+      properties.putAll(runTimeDistributionConfigAttributes);
+
+      Result result = ResultBuilder.createInfoResult(successMessageBuilder.toString());
+
+      // Set the Cache attributes to be modified
+      final XmlEntity xmlEntity = XmlEntity.builder().withType(CacheXml.CACHE)
+          .withAttributes(rumTimeCacheAttributes).build();
+      persistClusterConfiguration(result,
+          () -> getSharedConfiguration().modifyXmlAndProperties(properties, xmlEntity, group));
+      return result;
+    } else {
+      StringBuilder errorMessageBuilder = new StringBuilder();
+      errorMessageBuilder.append("Following errors occurred while altering runtime config");
+      errorMessageBuilder.append(lineSeparator);
+
+      for (String errorMessage : errorMessages) {
+        errorMessageBuilder.append(errorMessage);
+        errorMessageBuilder.append(lineSeparator);
+      }
+      return ResultBuilder.createUserErrorResult(errorMessageBuilder.toString());
+    }
+  }
+
+  public static class AlterRuntimeInterceptor extends AbstractCliAroundInterceptor {
+    @Override
+    public Result preExecution(GfshParseResult parseResult) {
+      Map<String, String> arguments = parseResult.getParamValueStrings();
+      // validate log level
+      String logLevel = arguments.get("log-level");
+      if (StringUtils.isNotBlank(logLevel) && (LogLevel.getLevel(logLevel) == null)) {
+        return ResultBuilder.createUserErrorResult("Invalid log level: " + logLevel);
+      }
+
+      return ResultBuilder.createInfoResult("");
+    }
+  }
+
+  /**
+   * Interceptor used by gfsh to intercept execution of export config command at "shell".
+   */
+  public static class Interceptor extends AbstractCliAroundInterceptor {
+    private String saveDirString;
+
+    @Override
+    public Result preExecution(GfshParseResult parseResult) {
+      Map<String, String> paramValueMap = parseResult.getParamValueStrings();
+      String dir = paramValueMap.get("dir");
+      dir = (dir == null) ? null : dir.trim();
+
+      File saveDirFile = new File(".");
+      if (dir != null && !dir.isEmpty()) {
+        saveDirFile = new File(dir);
+        if (saveDirFile.exists()) {
+          if (!saveDirFile.isDirectory())
+            return ResultBuilder.createGemFireErrorResult(
+                CliStrings.format(CliStrings.EXPORT_CONFIG__MSG__NOT_A_DIRECTORY, dir));
+        } else if (!saveDirFile.mkdirs()) {
+          return ResultBuilder.createGemFireErrorResult(
+              CliStrings.format(CliStrings.EXPORT_CONFIG__MSG__CANNOT_CREATE_DIR, dir));
+        }
+      }
+      try {
+        if (!saveDirFile.canWrite()) {
+          return ResultBuilder.createGemFireErrorResult(CliStrings.format(
+              CliStrings.EXPORT_CONFIG__MSG__NOT_WRITEABLE, saveDirFile.getCanonicalPath()));
+        }
+      } catch (IOException ioex) {
+        return ResultBuilder.createGemFireErrorResult(
+            CliStrings.format(CliStrings.EXPORT_CONFIG__MSG__NOT_WRITEABLE, saveDirFile.getName()));
+      }
+
+      saveDirString = saveDirFile.getAbsolutePath();
+      return ResultBuilder.createInfoResult("OK");
+    }
+
+    @Override
+    public Result postExecution(GfshParseResult parseResult, Result commandResult, Path tempFile) {
+      if (commandResult.hasIncomingFiles()) {
+        try {
+          commandResult.saveIncomingFiles(saveDirString);
+        } catch (IOException ioex) {
+          Gfsh.getCurrentInstance().logSevere("Unable to export config", ioex);
+        }
+      }
+
+      return commandResult;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/geode/blob/645a32d0/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigurePDXCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigurePDXCommand.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigurePDXCommand.java
deleted file mode 100644
index 8c490f4..0000000
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/ConfigurePDXCommand.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.geode.management.internal.cli.commands;
-
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.util.Arrays;
-
-import org.springframework.shell.core.annotation.CliCommand;
-import org.springframework.shell.core.annotation.CliOption;
-
-import org.apache.geode.internal.cache.CacheConfig;
-import org.apache.geode.internal.cache.xmlcache.CacheCreation;
-import org.apache.geode.internal.cache.xmlcache.CacheXml;
-import org.apache.geode.internal.cache.xmlcache.CacheXmlGenerator;
-import org.apache.geode.management.cli.CliMetaData;
-import org.apache.geode.management.cli.Result;
-import org.apache.geode.management.internal.cli.CliUtil;
-import org.apache.geode.management.internal.cli.i18n.CliStrings;
-import org.apache.geode.management.internal.cli.result.InfoResultData;
-import org.apache.geode.management.internal.cli.result.ResultBuilder;
-import org.apache.geode.management.internal.configuration.domain.XmlEntity;
-import org.apache.geode.management.internal.security.ResourceOperation;
-import org.apache.geode.pdx.ReflectionBasedAutoSerializer;
-import org.apache.geode.security.ResourcePermission;
-
-public class ConfigurePDXCommand implements GfshCommand {
-  @CliCommand(value = CliStrings.CONFIGURE_PDX, help = CliStrings.CONFIGURE_PDX__HELP)
-  @CliMetaData(relatedTopic = CliStrings.TOPIC_GEODE_REGION)
-  @ResourceOperation(resource = ResourcePermission.Resource.CLUSTER,
-      operation = ResourcePermission.Operation.MANAGE)
-  public Result configurePDX(
-      @CliOption(key = CliStrings.CONFIGURE_PDX__READ__SERIALIZED,
-          help = CliStrings.CONFIGURE_PDX__READ__SERIALIZED__HELP) Boolean readSerialized,
-      @CliOption(key = CliStrings.CONFIGURE_PDX__IGNORE__UNREAD_FIELDS,
-          help = CliStrings.CONFIGURE_PDX__IGNORE__UNREAD_FIELDS__HELP) Boolean ignoreUnreadFields,
-      @CliOption(key = CliStrings.CONFIGURE_PDX__DISKSTORE, specifiedDefaultValue = "",
-          help = CliStrings.CONFIGURE_PDX__DISKSTORE__HELP) String diskStore,
-      @CliOption(key = CliStrings.CONFIGURE_PDX__AUTO__SERIALIZER__CLASSES,
-          help = CliStrings.CONFIGURE_PDX__AUTO__SERIALIZER__CLASSES__HELP) String[] patterns,
-      @CliOption(key = CliStrings.CONFIGURE_PDX__PORTABLE__AUTO__SERIALIZER__CLASSES,
-          help = CliStrings.CONFIGURE_PDX__PORTABLE__AUTO__SERIALIZER__CLASSES__HELP) String[] portablePatterns) {
-    Result result;
-
-    try {
-      InfoResultData ird = ResultBuilder.createInfoResultData();
-      CacheCreation cache = new CacheCreation(true);
-
-      if ((portablePatterns != null && portablePatterns.length > 0)
-          && (patterns != null && patterns.length > 0)) {
-        return ResultBuilder.createUserErrorResult(CliStrings.CONFIGURE_PDX__ERROR__MESSAGE);
-      }
-      if (!CliUtil.getAllNormalMembers(CliUtil.getCacheIfExists()).isEmpty()) {
-        ird.addLine(CliStrings.CONFIGURE_PDX__NORMAL__MEMBERS__WARNING);
-      }
-      // Set persistent and the disk-store
-      if (diskStore != null) {
-        cache.setPdxPersistent(true);
-        ird.addLine(CliStrings.CONFIGURE_PDX__PERSISTENT + " = " + cache.getPdxPersistent());
-        if (!diskStore.equals("")) {
-          cache.setPdxDiskStore(diskStore);
-          ird.addLine(CliStrings.CONFIGURE_PDX__DISKSTORE + " = " + cache.getPdxDiskStore());
-        } else {
-          ird.addLine(CliStrings.CONFIGURE_PDX__DISKSTORE + " = " + "DEFAULT");
-        }
-      } else {
-        cache.setPdxPersistent(CacheConfig.DEFAULT_PDX_PERSISTENT);
-        ird.addLine(CliStrings.CONFIGURE_PDX__PERSISTENT + " = " + cache.getPdxPersistent());
-      }
-
-      // Set read-serialized
-      if (readSerialized != null) {
-        cache.setPdxReadSerialized(readSerialized);
-      } else {
-        cache.setPdxReadSerialized(CacheConfig.DEFAULT_PDX_READ_SERIALIZED);
-      }
-      ird.addLine(
-          CliStrings.CONFIGURE_PDX__READ__SERIALIZED + " = " + cache.getPdxReadSerialized());
-
-
-      // Set ignoreUnreadFields
-      if (ignoreUnreadFields != null) {
-        cache.setPdxIgnoreUnreadFields(ignoreUnreadFields);
-      } else {
-        cache.setPdxIgnoreUnreadFields(CacheConfig.DEFAULT_PDX_IGNORE_UNREAD_FIELDS);
-      }
-      ird.addLine(CliStrings.CONFIGURE_PDX__IGNORE__UNREAD_FIELDS + " = "
-          + cache.getPdxIgnoreUnreadFields());
-
-
-      if (portablePatterns != null) {
-        ReflectionBasedAutoSerializer autoSerializer =
-            new ReflectionBasedAutoSerializer(portablePatterns);
-        cache.setPdxSerializer(autoSerializer);
-        ird.addLine("PDX Serializer " + cache.getPdxSerializer().getClass().getName());
-        ird.addLine("Portable classes " + Arrays.toString(portablePatterns));
-      }
-
-      if (patterns != null) {
-        ReflectionBasedAutoSerializer nonPortableAutoSerializer =
-            new ReflectionBasedAutoSerializer(true, patterns);
-        cache.setPdxSerializer(nonPortableAutoSerializer);
-        ird.addLine("PDX Serializer : " + cache.getPdxSerializer().getClass().getName());
-        ird.addLine("Non portable classes :" + Arrays.toString(patterns));
-      }
-
-      final StringWriter stringWriter = new StringWriter();
-      final PrintWriter printWriter = new PrintWriter(stringWriter);
-      CacheXmlGenerator.generate(cache, printWriter, true, false, false);
-      printWriter.close();
-      String xmlDefinition = stringWriter.toString();
-      // TODO jbarrett - shouldn't this use the same loadXmlDefinition that other constructors use?
-      XmlEntity xmlEntity =
-          XmlEntity.builder().withType(CacheXml.PDX).withConfig(xmlDefinition).build();
-
-      result = ResultBuilder.buildResult(ird);
-      persistClusterConfiguration(result,
-          () -> getSharedConfiguration().addXmlEntity(xmlEntity, null));
-
-    } catch (Exception e) {
-      return ResultBuilder.createGemFireErrorResult(e.getMessage());
-    }
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/geode/blob/645a32d0/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CountDurableCQEventsCommand.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CountDurableCQEventsCommand.java b/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CountDurableCQEventsCommand.java
deleted file mode 100644
index 8ebc4da..0000000
--- a/geode-core/src/main/java/org/apache/geode/management/internal/cli/commands/CountDurableCQEventsCommand.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.geode.management.internal.cli.commands;
-
-import java.util.List;
-import java.util.Set;
-
-import org.springframework.shell.core.annotation.CliCommand;
-import org.springframework.shell.core.annotation.CliOption;
-
-import org.apache.geode.cache.execute.ResultCollector;
-import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.management.cli.CliMetaData;
-import org.apache.geode.management.cli.ConverterHint;
-import org.apache.geode.management.cli.Result;
-import org.apache.geode.management.internal.cli.CliUtil;
-import org.apache.geode.management.internal.cli.domain.SubscriptionQueueSizeResult;
-import org.apache.geode.management.internal.cli.functions.GetSubscriptionQueueSizeFunction;
-import org.apache.geode.management.internal.cli.i18n.CliStrings;
-import org.apache.geode.management.internal.cli.result.ResultBuilder;
-import org.apache.geode.management.internal.security.ResourceOperation;
-import org.apache.geode.security.ResourcePermission;
-
-public class CountDurableCQEventsCommand implements GfshCommand {
-  DurableClientCommandsResultBuilder builder = new DurableClientCommandsResultBuilder();
-
-  @CliCommand(value = CliStrings.COUNT_DURABLE_CQ_EVENTS,
-      help = CliStrings.COUNT_DURABLE_CQ_EVENTS__HELP)
-  @CliMetaData()
-  @ResourceOperation(resource = ResourcePermission.Resource.CLUSTER,
-      operation = ResourcePermission.Operation.READ)
-  public Result countDurableCqEvents(
-      @CliOption(key = CliStrings.COUNT_DURABLE_CQ_EVENTS__DURABLE__CLIENT__ID, mandatory = true,
-          help = CliStrings.COUNT_DURABLE_CQ_EVENTS__DURABLE__CLIENT__ID__HELP) final String durableClientId,
-      @CliOption(key = CliStrings.COUNT_DURABLE_CQ_EVENTS__DURABLE__CQ__NAME,
-          help = CliStrings.COUNT_DURABLE_CQ_EVENTS__DURABLE__CQ__NAME__HELP) final String cqName,
-      @CliOption(key = {CliStrings.MEMBER, CliStrings.MEMBERS},
-          help = CliStrings.COUNT_DURABLE_CQ_EVENTS__MEMBER__HELP,
-          optionContext = ConverterHint.MEMBERIDNAME) final String[] memberNameOrId,
-      @CliOption(key = {CliStrings.GROUP, CliStrings.GROUPS},
-          help = CliStrings.COUNT_DURABLE_CQ_EVENTS__GROUP__HELP,
-          optionContext = ConverterHint.MEMBERGROUP) final String[] group) {
-
-    Result result;
-    try {
-      Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrId);
-
-      if (targetMembers.isEmpty()) {
-        return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
-      }
-
-      String[] params = new String[2];
-      params[0] = durableClientId;
-      params[1] = cqName;
-      final ResultCollector<?, ?> rc =
-          CliUtil.executeFunction(new GetSubscriptionQueueSizeFunction(), params, targetMembers);
-      final List<SubscriptionQueueSizeResult> funcResults =
-          (List<SubscriptionQueueSizeResult>) rc.getResult();
-
-      String queueSizeColumnName;
-
-      if (cqName != null && !cqName.isEmpty()) {
-        queueSizeColumnName = CliStrings
-            .format(CliStrings.COUNT_DURABLE_CQ_EVENTS__SUBSCRIPTION__QUEUE__SIZE__CLIENT, cqName);
-      } else {
-        queueSizeColumnName = CliStrings.format(
-            CliStrings.COUNT_DURABLE_CQ_EVENTS__SUBSCRIPTION__QUEUE__SIZE__CLIENT, durableClientId);
-      }
-      result = builder.buildTableResultForQueueSize(funcResults, queueSizeColumnName);
-    } catch (Exception e) {
-      result = ResultBuilder.createGemFireErrorResult(e.getMessage());
-    }
-    return result;
-  }
-}