You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2019/08/19 21:26:55 UTC

[hbase] branch branch-2.2 updated: HBASE-22882 TestFlushSnapshotFromClient#testConcurrentSnapshottingAttempts is flakey (was written flakey) Addendum; just remove the test altogether

This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
     new b34ed62   HBASE-22882 TestFlushSnapshotFromClient#testConcurrentSnapshottingAttempts is flakey (was written flakey)  Addendum; just remove the test altogether
b34ed62 is described below

commit b34ed62a2b312058bc94e0e1a65403360228291e
Author: stack <st...@apache.org>
AuthorDate: Mon Aug 19 14:25:21 2019 -0700

     HBASE-22882 TestFlushSnapshotFromClient#testConcurrentSnapshottingAttempts is flakey (was written flakey)
     Addendum; just remove the test altogether
    
     Signed-off-by:  Nick Dimiduk <nd...@apache.org>
---
 .../snapshot/TestFlushSnapshotFromClient.java      | 117 +--------------------
 1 file changed, 1 insertion(+), 116 deletions(-)

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
index 73d68f2..efe7a0e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hbase.snapshot;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
@@ -27,7 +26,6 @@ import java.util.Comparator;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.CountDownLatch;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -52,7 +50,6 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
@@ -403,118 +400,6 @@ public class TestFlushSnapshotFromClient {
       snapshotName, rootDir, fs, true);
   }
 
-  /**
-   * Demonstrate that we reject snapshot requests if there is a snapshot already running on the
-   * same table currently running and that concurrent snapshots on different tables can both
-   * succeed concurrently.
-   */
-  @Ignore // Turning this test off. It is written flakey (See original submission in HBASE-7536
-  // for admission). While the test ensures we run one snapshot at a time as the above comment
-  // describes, the second part of the comment where we are supposed to allow snapshots against
-  // different tables run concurrently is where there is issue. There is only one
-  // handler thread on the regionserver-side which means that on a second submission, the
-  // second request gets rejected with RejectedExecutionException. The test used to sort-of
-  // pass with 20 attempts but as often-as-not, fails.
-  @Test
-  public void testConcurrentSnapshottingAttempts() throws IOException, InterruptedException {
-    final TableName TABLE2_NAME = TableName.valueOf(TABLE_NAME + "2");
-
-    int ssNum = 2;
-    // make sure we don't fail on listing snapshots
-    SnapshotTestingUtils.assertNoSnapshots(admin);
-    // create second testing table
-    SnapshotTestingUtils.createTable(UTIL, TABLE2_NAME, TEST_FAM);
-    // load the table so we have some data
-    SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
-    SnapshotTestingUtils.loadData(UTIL, TABLE2_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
-
-    final CountDownLatch toBeSubmitted = new CountDownLatch(ssNum);
-    // We'll have one of these per thread
-    class SSRunnable implements Runnable {
-      SnapshotDescription ss;
-      SSRunnable(SnapshotDescription ss) {
-        this.ss = ss;
-      }
-
-      @Override
-      public void run() {
-        try {
-          LOG.info("Submitting snapshot request: " + ClientSnapshotDescriptionUtils
-              .toString(ProtobufUtil.createHBaseProtosSnapshotDesc(ss)));
-          admin.takeSnapshotAsync(ss);
-        } catch (Exception e) {
-          LOG.info("Exception during snapshot request: " + ClientSnapshotDescriptionUtils.toString(
-            ProtobufUtil.createHBaseProtosSnapshotDesc(ss))
-              + ".  This is ok, we expect some", e);
-        }
-        LOG.info("Submitted snapshot request: " + ClientSnapshotDescriptionUtils
-            .toString(ProtobufUtil.createHBaseProtosSnapshotDesc(ss)));
-        toBeSubmitted.countDown();
-      }
-    };
-
-    // build descriptions
-    SnapshotDescription[] descs = new SnapshotDescription[ssNum];
-    for (int i = 0; i < ssNum; i++) {
-      if (i % 2 == 0) {
-        descs[i] = new SnapshotDescription("ss" + i, TABLE_NAME, SnapshotType.FLUSH);
-      } else {
-        descs[i] = new SnapshotDescription("ss" + i, TABLE2_NAME, SnapshotType.FLUSH);
-      }
-    }
-
-    // kick each off its own thread
-    for (int i=0 ; i < ssNum; i++) {
-      new Thread(new SSRunnable(descs[i])).start();
-    }
-
-    // wait until all have been submitted
-    toBeSubmitted.await();
-
-    // loop until all are done.
-    while (true) {
-      int doneCount = 0;
-      for (SnapshotDescription ss : descs) {
-        try {
-          if (admin.isSnapshotFinished(ss)) {
-            doneCount++;
-          }
-        } catch (Exception e) {
-          LOG.warn("Got an exception when checking for snapshot " + ss.getName(), e);
-          doneCount++;
-        }
-      }
-      if (doneCount == descs.length) {
-        break;
-      }
-      Thread.sleep(100);
-    }
-
-    // dump for debugging
-    UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
-
-    List<SnapshotDescription> taken = admin.listSnapshots();
-    int takenSize = taken.size();
-    LOG.info("Taken " + takenSize + " snapshots:  " + taken);
-    assertTrue("We expect at least 1 request to be rejected because of we concurrently" +
-        " issued many requests", takenSize < ssNum && takenSize > 0);
-
-    // Verify that there's at least one snapshot per table
-    int t1SnapshotsCount = 0;
-    int t2SnapshotsCount = 0;
-    for (SnapshotDescription ss : taken) {
-      if (ss.getTableName().equals(TABLE_NAME)) {
-        t1SnapshotsCount++;
-      } else if (ss.getTableName().equals(TABLE2_NAME)) {
-        t2SnapshotsCount++;
-      }
-    }
-    assertTrue("We expect at least 1 snapshot of table1 ", t1SnapshotsCount > 0);
-    assertTrue("We expect at least 1 snapshot of table2 ", t2SnapshotsCount > 0);
-
-    UTIL.deleteTable(TABLE2_NAME);
-  }
-
   private void waitRegionsAfterMerge(final long numRegionsAfterMerge)
       throws IOException, InterruptedException {
     // Verify that there's one region less