You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by mh...@apache.org on 2020/06/30 17:37:20 UTC

[geode] branch support/1.13 updated: GEODE-8095: Changes to make GEODE Respond to Restore Redundancy REST Command (#5300) (#5327)

This is an automated email from the ASF dual-hosted git repository.

mhanson pushed a commit to branch support/1.13
in repository https://gitbox.apache.org/repos/asf/geode.git


The following commit(s) were added to refs/heads/support/1.13 by this push:
     new 9255163  GEODE-8095: Changes to make GEODE Respond to Restore Redundancy REST Command (#5300) (#5327)
9255163 is described below

commit 9255163a026d5571a89ba6aad950b75539261414
Author: mhansonp <ha...@vmware.com>
AuthorDate: Tue Jun 30 10:35:55 2020 -0700

    GEODE-8095: Changes to make GEODE Respond to Restore Redundancy REST Command (#5300) (#5327)
    
    * GEODE-8095: Changes to make GEODE Respond to Restore Redundancy
    
    (cherry picked from commit f5c5e2cc7860c132074a16351ca4db847f64d6f7)
    
    Co-authored-by: Jinmei Liao <ji...@vmware.com>
---
 .../rest/RestoreRedundancyManagementDUnitTest.java | 345 +++++++++++++++++++++
 .../internal/operation/OperationManager.java       |   2 +
 ...rializableRestoreRedundancyResultsImplTest.java |   5 +-
 ...RedundancyRequestControllerIntegrationTest.java | 174 +++++++++++
 .../RestoreRedundancyOperationController.java      |  74 +++++
 5 files changed, 596 insertions(+), 4 deletions(-)

diff --git a/geode-assembly/src/distributedTest/java/org/apache/geode/management/internal/rest/RestoreRedundancyManagementDUnitTest.java b/geode-assembly/src/distributedTest/java/org/apache/geode/management/internal/rest/RestoreRedundancyManagementDUnitTest.java
new file mode 100644
index 0000000..3464fe7
--- /dev/null
+++ b/geode-assembly/src/distributedTest/java/org/apache/geode/management/internal/rest/RestoreRedundancyManagementDUnitTest.java
@@ -0,0 +1,345 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.geode.management.internal.rest;
+
+import static org.apache.geode.cache.PartitionAttributesFactory.GLOBAL_MAX_BUCKETS_DEFAULT;
+import static org.apache.geode.cache.Region.SEPARATOR;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.ExecutionException;
+import java.util.stream.IntStream;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.apache.geode.cache.Cache;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionShortcut;
+import org.apache.geode.internal.cache.InternalCache;
+import org.apache.geode.internal.cache.PartitionAttributesImpl;
+import org.apache.geode.internal.cache.PartitionedRegion;
+import org.apache.geode.management.api.ClusterManagementOperationResult;
+import org.apache.geode.management.api.ClusterManagementService;
+import org.apache.geode.management.client.ClusterManagementServiceBuilder;
+import org.apache.geode.management.operation.RestoreRedundancyRequest;
+import org.apache.geode.management.runtime.RegionRedundancyStatus;
+import org.apache.geode.management.runtime.RestoreRedundancyResults;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.rules.MemberStarterRule;
+
+/**
+ * This class borrows very heavily from the RestoreRedundancyCommandDUnitTest
+ *
+ */
+
+public class RestoreRedundancyManagementDUnitTest {
+
+  @Rule
+  public ClusterStartupRule cluster = new ClusterStartupRule();
+
+  private MemberVM locator;
+  private List<MemberVM> servers;
+  private static final int SERVERS_TO_START = 3;
+  private static final String HIGH_REDUNDANCY_REGION_NAME = "highRedundancy";
+  private static final int HIGH_REDUNDANCY_COPIES = SERVERS_TO_START - 1;
+  private static final String LOW_REDUNDANCY_REGION_NAME = "lowRedundancy";
+  private static final String PARENT_REGION_NAME = "colocatedParent";
+  private static final String CHILD_REGION_NAME = "colocatedChild";
+  private static final int SINGLE_REDUNDANT_COPY = 1;
+  private static final String NO_CONFIGURED_REDUNDANCY_REGION_NAME = "noConfiguredRedundancy";
+
+  private ClusterManagementService client1;
+
+  @Before
+  public void setup() {
+    locator = cluster.startLocatorVM(0, MemberStarterRule::withHttpService);
+    servers = new ArrayList<>();
+    int locatorPort = locator.getPort();
+    IntStream.range(0, SERVERS_TO_START)
+        .forEach(i -> servers.add(cluster.startServerVM(i + 1, locatorPort)));
+
+    client1 = new ClusterManagementServiceBuilder()
+        .setHost("localhost")
+        .setPort(locator.getHttpPort())
+        .build();
+  }
+
+  @After
+  public void tearDown() {
+    client1.close();
+  }
+
+  @Test
+  public void restoreRedundancyWithNoArgumentsRestoresRedundancyForAllRegions()
+      throws ExecutionException, InterruptedException {
+
+    List<String> regionNames = getAllRegionNames();
+    createAndPopulateRegions(regionNames);
+
+    int numberOfServers = servers.size();
+    regionNames.forEach(region -> locator
+        .waitUntilRegionIsReadyOnExactlyThisManyServers(SEPARATOR + region, numberOfServers));
+
+    RestoreRedundancyRequest restoreRedundancyRequest = new RestoreRedundancyRequest();
+
+    restoreRedundancyRequest.setIncludeRegions(regionNames);
+
+    verifyClusterManagementOperationRequestAndResponse(restoreRedundancyRequest);
+
+    // Confirm all regions have their configured redundancy and that primaries were balanced
+    int numberOfActiveServers = servers.size();
+    servers.get(0).invoke(() -> {
+      for (String regionName : regionNames) {
+        assertRedundancyStatusForRegion(regionName, true);
+        assertPrimariesBalanced(regionName, numberOfActiveServers, true);
+      }
+    });
+  }
+
+  // Helper methods
+  private void verifyClusterManagementOperationRequestAndResponse(
+      RestoreRedundancyRequest restoreRedundancyRequest)
+      throws InterruptedException, ExecutionException {
+    ClusterManagementOperationResult<RestoreRedundancyRequest, RestoreRedundancyResults> startResult =
+        client1.start(restoreRedundancyRequest);
+
+    assertThat(startResult.isSuccessful()).isTrue();
+
+    ClusterManagementOperationResult<RestoreRedundancyRequest, RestoreRedundancyResults> endResult =
+        client1.getFuture(restoreRedundancyRequest, startResult.getOperationId()).get();
+    RestoreRedundancyResults restoreRedundancyResult = endResult.getOperationResult();
+
+    assertThat(restoreRedundancyResult.getSuccess()).isTrue();
+
+    boolean found;
+    for (String regionName : restoreRedundancyRequest.getIncludeRegions()) {
+      found = false;
+      for (Map.Entry<String, RegionRedundancyStatus> region : restoreRedundancyResult
+          .getSatisfiedRedundancyRegionResults().entrySet()) {
+        if (region.getValue().getRegionName().compareTo(regionName) == 0) {
+          found = true;
+          break;
+        }
+      }
+      assertThat(found)
+          .describedAs("Satisfied Redundancy List contains region name is true for included")
+          .isTrue();
+    }
+
+    for (String regionName : restoreRedundancyRequest.getIncludeRegions()) {
+      for (Map.Entry<String, RegionRedundancyStatus> region : restoreRedundancyResult
+          .getUnderRedundancyRegionResults().entrySet()) {
+        assertThat(regionName)
+            .describedAs("One of regions we expect to be satisfied is marked as Under Redundancy")
+            .isNotEqualTo(region.getValue().getRegionName());
+      }
+    }
+
+    for (String regionName : restoreRedundancyRequest.getIncludeRegions()) {
+      for (Map.Entry<String, RegionRedundancyStatus> region : restoreRedundancyResult
+          .getZeroRedundancyRegionResults().entrySet()) {
+        assertThat(regionName)
+            .describedAs("One of regions we expect to be satisfied is marked as Zero Redundancy")
+            .isNotEqualTo(region.getValue().getRegionName());
+      }
+    }
+
+    List<String> filteredExclude;
+
+    if (restoreRedundancyRequest.getExcludeRegions() != null) {
+      filteredExclude = new ArrayList<>(restoreRedundancyRequest.getExcludeRegions());
+    } else {
+      filteredExclude = new ArrayList<>();
+    }
+
+    for (String regionName : restoreRedundancyRequest.getIncludeRegions()) {
+      filteredExclude.remove(regionName);
+    }
+
+    // Testing for the absence of the region name...
+    for (String regionName : filteredExclude) {
+      found = false;
+      for (Map.Entry<String, RegionRedundancyStatus> region : restoreRedundancyResult
+          .getSatisfiedRedundancyRegionResults().entrySet()) {
+        if (region.getValue().getRegionName().compareTo(regionName) == 0) {
+          found = true;
+          break;
+        }
+      }
+      assertThat(found)
+          .describedAs("Satisfied Redundancy List contains region name is false for excluded")
+          .isFalse();
+    }
+  }
+
+  private List<String> getAllRegionNames() {
+    List<String> regionNames = new ArrayList<>();
+    regionNames.add(HIGH_REDUNDANCY_REGION_NAME);
+    regionNames.add(LOW_REDUNDANCY_REGION_NAME);
+    regionNames.add(PARENT_REGION_NAME);
+    regionNames.add(CHILD_REGION_NAME);
+    regionNames.add(NO_CONFIGURED_REDUNDANCY_REGION_NAME);
+    return regionNames;
+  }
+
+  private void createAndPopulateRegions(List<String> regionNames) {
+    // Create regions on server 1 and populate them.
+    servers.get(0).invoke(() -> {
+      createRegions(regionNames);
+      populateRegions(regionNames);
+    });
+
+    // Create regions on other servers. Since recovery delay is infinite, all buckets for these
+    // regions remain on server 1 and redundancy is zero
+    servers.subList(1, servers.size()).forEach(s -> s.invoke(() -> {
+      createRegions(regionNames);
+    }));
+
+    int numberOfActiveServers = servers.size();
+    servers.get(0).invoke(() -> {
+      // Confirm that redundancy is impaired for all regions
+      for (String regionName : regionNames) {
+        assertRedundancyStatusForRegion(regionName, false);
+        assertPrimariesBalanced(regionName, numberOfActiveServers, false);
+      }
+    });
+  }
+
+  private static void createRegions(List<String> regionsToCreate) {
+    if (regionsToCreate.contains(HIGH_REDUNDANCY_REGION_NAME)) {
+      createHighRedundancyRegion();
+    }
+    if (regionsToCreate.contains(LOW_REDUNDANCY_REGION_NAME)) {
+      createLowRedundancyRegion();
+    }
+    // We have to create both colocated regions or neither
+    if (regionsToCreate.contains(PARENT_REGION_NAME)
+        || regionsToCreate.contains(CHILD_REGION_NAME)) {
+      createColocatedRegions();
+    }
+    if (regionsToCreate.contains(NO_CONFIGURED_REDUNDANCY_REGION_NAME)) {
+      createNoConfiguredRedundancyRegion();
+    }
+  }
+
+  private static void createHighRedundancyRegion() {
+    PartitionAttributesImpl attributes = getAttributes(HIGH_REDUNDANCY_COPIES);
+    InternalCache cache = Objects.requireNonNull(ClusterStartupRule.getCache());
+    cache.createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(attributes)
+        .create(HIGH_REDUNDANCY_REGION_NAME);
+  }
+
+  private static void createLowRedundancyRegion() {
+    PartitionAttributesImpl attributes = getAttributes(SINGLE_REDUNDANT_COPY);
+    InternalCache cache = Objects.requireNonNull(ClusterStartupRule.getCache());
+    cache.createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(attributes)
+        .create(LOW_REDUNDANCY_REGION_NAME);
+  }
+
+  private static void createColocatedRegions() {
+    PartitionAttributesImpl attributes = getAttributes(SINGLE_REDUNDANT_COPY);
+    InternalCache cache = Objects.requireNonNull(ClusterStartupRule.getCache());
+    // Create parent region
+    cache.createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(attributes)
+        .create(PARENT_REGION_NAME);
+
+    // Create colocated region
+    attributes.setColocatedWith(PARENT_REGION_NAME);
+    cache.createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(attributes)
+        .create(CHILD_REGION_NAME);
+  }
+
+  private static void createNoConfiguredRedundancyRegion() {
+    PartitionAttributesImpl attributes = getAttributes(0);
+    InternalCache cache = Objects.requireNonNull(ClusterStartupRule.getCache());
+    cache.createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(attributes)
+        .create(NO_CONFIGURED_REDUNDANCY_REGION_NAME);
+  }
+
+  private static PartitionAttributesImpl getAttributes(int redundantCopies) {
+    PartitionAttributesImpl attributes = new PartitionAttributesImpl();
+    attributes.setStartupRecoveryDelay(-1);
+    attributes.setRecoveryDelay(-1);
+    attributes.setRedundantCopies(redundantCopies);
+    return attributes;
+  }
+
+  private static void populateRegions(List<String> regionNames) {
+    if (regionNames.isEmpty()) {
+      return;
+    }
+    Cache cache = Objects.requireNonNull(ClusterStartupRule.getCache());
+
+    // Populate all the regions
+    regionNames.forEach(regionName -> {
+      Region<Object, Object> region = cache.getRegion(regionName);
+      IntStream.range(0, 5 * GLOBAL_MAX_BUCKETS_DEFAULT)
+          .forEach(i -> region.put("key" + i, "value" + i));
+    });
+  }
+
+  private static void assertRedundancyStatusForRegion(String regionName,
+      boolean shouldBeSatisfied) {
+    // Redundancy is always satisfied for a region with zero configured redundancy
+    if (regionName.equals(NO_CONFIGURED_REDUNDANCY_REGION_NAME)) {
+      return;
+    }
+    Cache cache = Objects.requireNonNull(ClusterStartupRule.getCache());
+
+    PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
+    Assert.assertThat(region.getRedundancyProvider().isRedundancyImpaired(),
+        is(!shouldBeSatisfied));
+  }
+
+  private static void assertPrimariesBalanced(String regionName, int numberOfServers,
+      boolean shouldBeBalanced) {
+    // Primaries cannot be balanced for regions with no redundant copies
+    if (regionName.equals(NO_CONFIGURED_REDUNDANCY_REGION_NAME)) {
+      return;
+    }
+    Cache cache = Objects.requireNonNull(ClusterStartupRule.getCache());
+
+    PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
+    int primariesOnServer = region.getLocalPrimaryBucketsListTestOnly().size();
+    // Add one to the expected number of primaries to deal with integer rounding errors
+    int expectedPrimaries = (GLOBAL_MAX_BUCKETS_DEFAULT / numberOfServers) + 1;
+    // Because of the way reassigning primaries works, it is sometimes only possible to get the
+    // difference between the most loaded member and the least loaded member to be 2, not 1 as would
+    // be the case for perfect balance
+    String message = "Primaries should be balanced for region " + regionName
+        + ", but expectedPrimaries:actualPrimaries = "
+        + expectedPrimaries + ":" + primariesOnServer;
+    if (shouldBeBalanced) {
+      Assert.assertThat(message, Math.abs(primariesOnServer - expectedPrimaries),
+          is(lessThanOrEqualTo(2)));
+    } else {
+      Assert.assertThat("Primaries should not be balanced for region " + regionName,
+          Math.abs(primariesOnServer - expectedPrimaries), is(not(lessThanOrEqualTo(2))));
+    }
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/operation/OperationManager.java b/geode-core/src/main/java/org/apache/geode/management/internal/operation/OperationManager.java
index 7aa9aff..c5c7027 100644
--- a/geode-core/src/main/java/org/apache/geode/management/internal/operation/OperationManager.java
+++ b/geode-core/src/main/java/org/apache/geode/management/internal/operation/OperationManager.java
@@ -25,6 +25,7 @@ import org.apache.geode.internal.cache.InternalCache;
 import org.apache.geode.logging.internal.executors.LoggingExecutors;
 import org.apache.geode.management.api.ClusterManagementOperation;
 import org.apache.geode.management.operation.RebalanceOperation;
+import org.apache.geode.management.operation.RestoreRedundancyRequest;
 import org.apache.geode.management.runtime.OperationResult;
 
 @Experimental
@@ -42,6 +43,7 @@ public class OperationManager implements AutoCloseable {
     // initialize the list of operation performers
     performers = new ConcurrentHashMap<>();
     registerOperation(RebalanceOperation.class, new RebalanceOperationPerformer());
+    registerOperation(RestoreRedundancyRequest.class, new RestoreRedundancyPerformer());
   }
 
   /**
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/control/SerializableRestoreRedundancyResultsImplTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/control/SerializableRestoreRedundancyResultsImplTest.java
index 473ee30..f365c6d 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/control/SerializableRestoreRedundancyResultsImplTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/control/SerializableRestoreRedundancyResultsImplTest.java
@@ -210,14 +210,11 @@ public class SerializableRestoreRedundancyResultsImplTest {
     regionRedundancyStatus.setStatus(SATISFIED);
     restoreRedundancyResults.addRegionResult(regionRedundancyStatus);
     String jsonString = geodeMapper.writeValueAsString(restoreRedundancyResults);
-    // deserialize the class
-
 
+    // deserialize the class
     RestoreRedundancyResultsImpl value =
         geodeMapper.readValue(jsonString, RestoreRedundancyResultsImpl.class);
 
     assertThat(value).usingRecursiveComparison().isEqualTo(restoreRedundancyResults);
-
-
   }
 }
diff --git a/geode-web-management/src/integrationTest/java/org/apache/geode/management/internal/rest/RestoreRedundancyRequestControllerIntegrationTest.java b/geode-web-management/src/integrationTest/java/org/apache/geode/management/internal/rest/RestoreRedundancyRequestControllerIntegrationTest.java
new file mode 100644
index 0000000..1c7fbc0
--- /dev/null
+++ b/geode-web-management/src/integrationTest/java/org/apache/geode/management/internal/rest/RestoreRedundancyRequestControllerIntegrationTest.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.geode.management.internal.rest;
+
+import static org.apache.geode.management.configuration.Links.URI_VERSION;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.core.StringContains.containsString;
+import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
+import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
+import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content;
+import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath;
+import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
+
+import java.util.concurrent.CompletableFuture;
+
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.test.annotation.DirtiesContext;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.junit4.SpringRunner;
+import org.springframework.test.context.web.WebAppConfiguration;
+import org.springframework.test.web.servlet.ResultActions;
+import org.springframework.test.web.servlet.ResultMatcher;
+import org.springframework.web.client.RestTemplate;
+import org.springframework.web.context.WebApplicationContext;
+
+import org.apache.geode.management.api.ClusterManagementListOperationsResult;
+import org.apache.geode.management.api.ClusterManagementOperationResult;
+import org.apache.geode.management.api.ClusterManagementService;
+import org.apache.geode.management.api.ClusterManagementServiceTransport;
+import org.apache.geode.management.api.RestTemplateClusterManagementServiceTransport;
+import org.apache.geode.management.client.ClusterManagementServiceBuilder;
+import org.apache.geode.management.operation.RestoreRedundancyRequest;
+import org.apache.geode.management.runtime.RestoreRedundancyResults;
+
+@RunWith(SpringRunner.class)
+@ContextConfiguration(locations = {"classpath*:WEB-INF/management-servlet.xml"},
+    loader = PlainLocatorContextLoader.class)
+@WebAppConfiguration
+@DirtiesContext(classMode = DirtiesContext.ClassMode.AFTER_CLASS)
+public class RestoreRedundancyRequestControllerIntegrationTest {
+
+  @Autowired
+  private WebApplicationContext webApplicationContext;
+
+  // needs to be used together with any LocatorContextLoader
+  private LocatorWebContext context;
+
+  private ClusterManagementService client;
+  private static final String RESTORE_REDUNDANCY_URL = "/operations/restoreRedundancy";
+
+  @Before
+  public void before() {
+    context = new LocatorWebContext(webApplicationContext);
+
+    RestTemplate template = new RestTemplate();
+    template.setRequestFactory(context.getRequestFactory());
+    ClusterManagementServiceTransport transport =
+        new RestTemplateClusterManagementServiceTransport(template);
+    client = new ClusterManagementServiceBuilder().setTransport(transport).build();
+  }
+
+  @Test
+  public void start() throws Exception {
+    String json = "{}";
+    context.perform(post("/v1" + RESTORE_REDUNDANCY_URL).content(json))
+        .andExpect(status().isAccepted())
+        .andExpect(content().string(not(containsString("\"class\""))))
+        .andExpect(
+            jsonPath("$.links.self",
+                Matchers.containsString("/v1" + RESTORE_REDUNDANCY_URL)))
+        .andExpect(jsonPath("$.statusMessage", Matchers.containsString("Operation started")));
+  }
+
+  @Test
+  public void getStatus() throws Exception {
+    String json = "{}";
+    CompletableFuture<String> futureUri = new CompletableFuture<>();
+    context.perform(post("/v1" + RESTORE_REDUNDANCY_URL).content(json))
+        .andExpect(status().isAccepted())
+        .andExpect(new ResponseBodyMatchers().containsObjectAsJson(futureUri))
+        .andExpect(jsonPath("$.statusMessage", Matchers.containsString("Operation started")));
+    while (true) {
+      try {
+        ResultActions resultActions = context.perform(get(futureUri.get()));
+        resultActions
+            .andExpect(status().isOk())
+            .andExpect(jsonPath("$.statusCode", Matchers.is("IN_PROGRESS")));
+      } catch (AssertionError t) {
+        context.perform(get(futureUri.get()))
+            .andExpect(status().isOk())
+            .andExpect(jsonPath("$.operationResult.success", Matchers.is(true)));
+        return;
+      }
+    }
+  }
+
+  static class ResponseBodyMatchers {
+    ResultMatcher containsObjectAsJson(CompletableFuture<String> futureUri) {
+      return mvcResult -> {
+        String json = mvcResult.getResponse().getContentAsString();
+        String uri =
+            json.replaceFirst(".*\"self\":\"[^\"]*" + URI_VERSION, URI_VERSION).replaceFirst("\".*",
+                "");
+        futureUri.complete(uri);
+      };
+    }
+  }
+
+  @Test
+  public void checkStatusOperationDoesNotExist() throws Exception {
+    context.perform(get("/v1" + RESTORE_REDUNDANCY_URL + "/abc"))
+        .andExpect(status().isNotFound())
+        .andExpect(content().string(not(containsString("\"class\""))))
+        .andExpect(jsonPath("$.statusCode", Matchers.is("ENTITY_NOT_FOUND")))
+        .andExpect(
+            jsonPath("$.statusMessage",
+                Matchers.containsString("Operation 'abc' does not exist.")));
+  }
+
+  @Test
+  public void list() throws Exception {
+    String json = "{}";
+    context.perform(post("/v1" + RESTORE_REDUNDANCY_URL).content(json));
+    context.perform(get("/v1" + RESTORE_REDUNDANCY_URL))
+        .andExpect(status().isOk())
+        .andExpect(content().string(not(containsString("\"class\""))))
+        .andExpect(
+            jsonPath("$.result[0].statusCode", Matchers.isOneOf("IN_PROGRESS", "ERROR", "OK")))
+        .andExpect(
+            jsonPath("$.result[0].links.self", Matchers.containsString("restoreRedundancy/")))
+        .andExpect(jsonPath("$.statusCode", Matchers.is("OK")));
+  }
+
+  @Test
+  public void doOperation() throws Exception {
+    RestoreRedundancyRequest rebalance =
+        new RestoreRedundancyRequest();
+    ClusterManagementOperationResult<RestoreRedundancyRequest, RestoreRedundancyResults> result =
+        client.start(rebalance);
+    assertThat(result.isSuccessful()).isTrue();
+    assertThat(result.getStatusMessage())
+        .isEqualTo("Operation started.  Use the URI to check its status.");
+  }
+
+  @Test
+  public void doListOperations() {
+    client.start(new RestoreRedundancyRequest());
+    ClusterManagementListOperationsResult<RestoreRedundancyRequest, RestoreRedundancyResults> listResult =
+        client.list(new RestoreRedundancyRequest());
+    assertThat(listResult.getResult().size()).isGreaterThanOrEqualTo(1);
+    assertThat(listResult.getResult().get(0).getOperationStart()).isNotNull();
+    assertThat(listResult.getResult().get(0).getStatusCode().toString()).isIn("IN_PROGRESS",
+        "ERROR", "OK");
+  }
+}
diff --git a/geode-web-management/src/main/java/org/apache/geode/management/internal/rest/controllers/RestoreRedundancyOperationController.java b/geode-web-management/src/main/java/org/apache/geode/management/internal/rest/controllers/RestoreRedundancyOperationController.java
new file mode 100644
index 0000000..5f1ece4
--- /dev/null
+++ b/geode-web-management/src/main/java/org/apache/geode/management/internal/rest/controllers/RestoreRedundancyOperationController.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.geode.management.internal.rest.controllers;
+
+import static org.apache.geode.management.configuration.Links.URI_VERSION;
+import static org.apache.geode.management.operation.RestoreRedundancyRequest.RESTORE_REDUNDANCY_ENDPOINT;
+
+import java.util.Optional;
+
+import io.swagger.annotations.ApiOperation;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.ResponseEntity;
+import org.springframework.security.access.prepost.PreAuthorize;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.PathVariable;
+import org.springframework.web.bind.annotation.PostMapping;
+import org.springframework.web.bind.annotation.RequestBody;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.RestController;
+
+import org.apache.geode.internal.security.SecurityService;
+import org.apache.geode.management.api.ClusterManagementListOperationsResult;
+import org.apache.geode.management.api.ClusterManagementOperationResult;
+import org.apache.geode.management.operation.RestoreRedundancyRequest;
+import org.apache.geode.management.runtime.RestoreRedundancyResults;
+
+@RestController("restoreRedundancyOperation")
+@RequestMapping(URI_VERSION)
+public class RestoreRedundancyOperationController extends AbstractManagementController {
+  @ApiOperation(value = "start restore-redundancy")
+  @PreAuthorize("@securityService.authorize('DATA', 'MANAGE')")
+  @PostMapping(RESTORE_REDUNDANCY_ENDPOINT)
+  public ResponseEntity<ClusterManagementOperationResult<RestoreRedundancyRequest, RestoreRedundancyResults>> startRestoreRedundancy(
+      @RequestBody RestoreRedundancyRequest operation) {
+    operation.setOperator(
+        Optional.ofNullable(securityService).map(SecurityService::getSubject).map(Object::toString)
+            .orElse(null));
+    ClusterManagementOperationResult<RestoreRedundancyRequest, RestoreRedundancyResults> result =
+        clusterManagementService
+            .start(operation);
+    return new ResponseEntity<>(result, HttpStatus.ACCEPTED);
+  }
+
+  @ApiOperation(value = "list restore-redundancy")
+  @PreAuthorize("@securityService.authorize('DATA', 'MANAGE')")
+  @GetMapping(RESTORE_REDUNDANCY_ENDPOINT)
+  public ClusterManagementListOperationsResult<RestoreRedundancyRequest, RestoreRedundancyResults> listRestoreRedundancies() {
+    return clusterManagementService.list(new RestoreRedundancyRequest());
+  }
+
+  @ApiOperation(value = "get restore-redundancy")
+  @PreAuthorize("@securityService.authorize('DATA', 'MANAGE')")
+  @GetMapping(RESTORE_REDUNDANCY_ENDPOINT + "/{id:.+}")
+  public ClusterManagementOperationResult<RestoreRedundancyRequest, RestoreRedundancyResults> getRestoreRedundancy(
+      @PathVariable String id) {
+    ClusterManagementOperationResult<RestoreRedundancyRequest, RestoreRedundancyResults> result =
+        clusterManagementService.get(new RestoreRedundancyRequest(), id);
+    return result;
+  }
+}