You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iceberg.apache.org by bl...@apache.org on 2019/01/07 22:04:58 UTC

[incubator-iceberg] branch master updated: Use a separate executor service for the concurrent Hive test (#67)

This is an automated email from the ASF dual-hosted git repository.

blue pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-iceberg.git


The following commit(s) were added to refs/heads/master by this push:
     new 1ca5599  Use a separate executor service for the concurrent Hive test (#67)
1ca5599 is described below

commit 1ca5599bf841d86717a8cfbad6c7f3e0e8be48f2
Author: Anton Okolnychyi <ao...@apple.com>
AuthorDate: Tue Jan 8 00:04:54 2019 +0200

    Use a separate executor service for the concurrent Hive test (#67)
    
    This fixes CI build errors for master.
---
 .../src/test/java/com/netflix/iceberg/hive/HiveTablesTest.java | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/hive/src/test/java/com/netflix/iceberg/hive/HiveTablesTest.java b/hive/src/test/java/com/netflix/iceberg/hive/HiveTablesTest.java
index 6226a43..91e0a73 100644
--- a/hive/src/test/java/com/netflix/iceberg/hive/HiveTablesTest.java
+++ b/hive/src/test/java/com/netflix/iceberg/hive/HiveTablesTest.java
@@ -15,6 +15,7 @@
  */
 package com.netflix.iceberg.hive;
 
+import com.google.common.util.concurrent.MoreExecutors;
 import com.netflix.iceberg.DataFile;
 import com.netflix.iceberg.DataFiles;
 import com.netflix.iceberg.FileFormat;
@@ -29,13 +30,15 @@ import org.junit.Test;
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
 import static com.netflix.iceberg.BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE;
 import static com.netflix.iceberg.BaseMetastoreTableOperations.METADATA_LOCATION_PROP;
 import static com.netflix.iceberg.BaseMetastoreTableOperations.TABLE_TYPE_PROP;
-import static com.netflix.iceberg.util.ThreadPools.getWorkerPool;
 
 public class HiveTablesTest extends HiveTableBaseTest {
   @Test
@@ -108,9 +111,12 @@ public class HiveTablesTest extends HiveTableBaseTest {
       .withFileSizeInBytes(0)
       .build();
 
+    ExecutorService executorService = MoreExecutors.getExitingExecutorService(
+      (ThreadPoolExecutor) Executors.newFixedThreadPool(2));
+
     Tasks.foreach(icebergTable, anotherIcebergTable)
       .stopOnFailure().throwFailureWhenFinished()
-      .executeWith(getWorkerPool())
+      .executeWith(executorService)
       .run(table -> {
         for (int numCommittedFiles = 0; numCommittedFiles < 10; numCommittedFiles++) {
           long commitStartTime = System.currentTimeMillis();