You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@zeppelin.apache.org by zj...@apache.org on 2022/03/16 14:15:45 UTC

[zeppelin] branch master updated: [ZEPPELIN-5671] Split spark-integration-test to hadoop2 and hadoop3 (#4311)

This is an automated email from the ASF dual-hosted git repository.

zjffdu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/zeppelin.git


The following commit(s) were added to refs/heads/master by this push:
     new d6da9af  [ZEPPELIN-5671] Split spark-integration-test to hadoop2 and hadoop3 (#4311)
d6da9af is described below

commit d6da9af4abbaeb71d93b5273df423af808a035f1
Author: Jeff Zhang <zj...@apache.org>
AuthorDate: Wed Mar 16 22:15:31 2022 +0800

    [ZEPPELIN-5671] Split spark-integration-test to hadoop2 and hadoop3 (#4311)
---
 .github/workflows/core.yml | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/.github/workflows/core.yml b/.github/workflows/core.yml
index e765a54..f9eb3ce 100644
--- a/.github/workflows/core.yml
+++ b/.github/workflows/core.yml
@@ -254,6 +254,10 @@ jobs:
 
   spark-integration-test:
     runs-on: ubuntu-20.04
+    strategy:
+      fail-fast: false
+      matrix:
+        hadoop: [ 2, 3 ]
     steps:
       - name: Checkout
         uses: actions/checkout@v2
@@ -291,12 +295,8 @@ jobs:
       - name: Make IRkernel available to Jupyter
         run: |
           R -e "IRkernel::installspec()"
-      - name: run tests on hadoop2
-        run: ./mvnw test -DskipRat -pl zeppelin-interpreter-integration -Phadoop2 -Pintegration -B -Dtest=SparkSubmitIntegrationTest,ZeppelinSparkClusterTest24,SparkIntegrationTest24,ZeppelinSparkClusterTest30,SparkIntegrationTest30,ZeppelinSparkClusterTest31,SparkIntegrationTest31,ZeppelinSparkClusterTest32,SparkIntegrationTest32 -DfailIfNoTests=false
-      - name: run tests on hadoop3
-        run: |
-          rm -rf spark/interpreter/metastore_db
-          ./mvnw test -DskipRat -pl zeppelin-interpreter-integration -Phadoop3 -Pintegration -B -Dtest=SparkSubmitIntegrationTest,ZeppelinSparkClusterTest24,SparkIntegrationTest24,ZeppelinSparkClusterTest30,SparkIntegrationTest30,ZeppelinSparkClusterTest31,SparkIntegrationTest31,ZeppelinSparkClusterTest32,SparkIntegrationTest32 -DfailIfNoTests=false
+      - name: run tests on hadoop${{ matrix.hadoop }}
+        run: ./mvnw test -DskipRat -pl zeppelin-interpreter-integration -Phadoop${{ matrix.hadoop }} -Pintegration -B -Dtest=SparkSubmitIntegrationTest,ZeppelinSparkClusterTest24,SparkIntegrationTest24,ZeppelinSparkClusterTest30,SparkIntegrationTest30,ZeppelinSparkClusterTest31,SparkIntegrationTest31,ZeppelinSparkClusterTest32,SparkIntegrationTest32 -DfailIfNoTests=false
 
   # test on spark for each spark version & scala version
   spark-test: