You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@accumulo.apache.org by GitBox <gi...@apache.org> on 2022/09/13 18:47:29 UTC

[GitHub] [accumulo] dlmarion commented on a diff in pull request #2928: LargeSplitRowIT.automaticSplitLater - increase timeout and other improvements

dlmarion commented on code in PR #2928:
URL: https://github.com/apache/accumulo/pull/2928#discussion_r969971897


##########
test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java:
##########
@@ -130,38 +129,35 @@ public void automaticSplitWith250Same() throws Exception {
     // make a table and lower the configure properties
     final String tableName = getUniqueNames(1)[0];
     try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
-      client.tableOperations().create(tableName);
-      client.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(),
-          "10K");
-      client.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(),
-          "none");
-      client.tableOperations().setProperty(tableName,
-          Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64");
-      client.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(),
-          "1000");
-
-      // Create a BatchWriter and key for a table entry that is longer than the allowed size for an
+      Map<String,String> props = new HashMap<>();

Review Comment:
   Would `Map.of()` work here? You use it above.



##########
test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java:
##########
@@ -205,104 +201,92 @@ public void automaticSplitWithoutGaps() throws Exception {
   }
 
   @Test
-  @Timeout(60)
+  @Timeout(120)
   public void automaticSplitLater() throws Exception {
     log.info("Split later");
     try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
-      automaticSplit(client, 15, 1);
-
-      String tableName = new String();
-
-      for (String curr : client.tableOperations().list()) {
-        if (!curr.startsWith(Namespace.ACCUMULO.name() + ".")) {
-          tableName = curr;
-        }
-      }
-
-      // Create a BatchWriter and key for a table entry that is longer than the allowed size for an
-      // end row
-      BatchWriter batchWriter = client.createBatchWriter(tableName);
-      byte[] data = new byte[10];
-
-      // Fill key with all j's except for last spot which alternates through 1 through 10 for every
-      // j
-      // value
-      for (int j = 15; j < 150; j += 1) {
-        for (int i = 0; i < data.length - 1; i++) {
-          data[i] = (byte) j;
-        }
-
-        for (int i = 0; i < 25; i++) {
-          data[data.length - 1] = (byte) i;
-          Mutation m = new Mutation(data);
-          m.put("cf", "cq", "value");
-          batchWriter.addMutation(m);
+      final int max = 15;
+      automaticSplit(client, max, 1);
+
+      Predicate<String> isNotNamespaceTable =
+          table -> !table.startsWith(Namespace.ACCUMULO.name() + ".");
+      String tableName = client.tableOperations().list().stream().filter(isNotNamespaceTable)
+          .findAny().orElseGet(() -> fail("couldn't find a table"));
+
+      try (BatchWriter batchWriter = client.createBatchWriter(tableName)) {
+        byte[] data = new byte[10];
+        for (int j = max; j < 150; j++) {
+          // Fill key with all j's except for the last index
+          Arrays.fill(data, 0, data.length - 2, (byte) j);
+
+          // for each j, make the last index of key 0 through 24 then add the mutation
+          for (int i = 0; i < 25; i++) {
+            data[data.length - 1] = (byte) i;
+            Mutation m = new Mutation(data);
+            m.put("cf", "cq", "value");
+            batchWriter.addMutation(m);
+          }
         }
       }
-      // Flush the BatchWriter and table and sleep for a bit to make sure that there is enough time
-      // for the table to split if need be.
-      batchWriter.close();
+      // Flush the BatchWriter and table then wait for splits to be present
       client.tableOperations().flush(tableName, new Text(), new Text("z"), true);
 
       // Make sure a split occurs
-      while (client.tableOperations().listSplits(tableName).isEmpty()) {
-        Thread.sleep(250);
-      }
+      Wait.Condition splitsToBePresent =
+          () -> client.tableOperations().listSplits(tableName).stream().findAny().isPresent();
+      Wait.waitFor(splitsToBePresent, SECONDS.toMillis(60L), 250L);
 
-      assertTrue(!client.tableOperations().listSplits(tableName).isEmpty());
+      assertTrue(client.tableOperations().listSplits(tableName).stream().findAny().isPresent());
     }
   }
 
   private void automaticSplit(AccumuloClient client, int max, int spacing) throws Exception {
     // make a table and lower the configure properties
+    Map<String,String> props = new HashMap<>();

Review Comment:
   Map.of() ?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@accumulo.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org