You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by dw...@apache.org on 2020/07/21 09:51:51 UTC

[flink] 02/02: Revert "[FLINK-18600] Temporary disable kerberized e2e yarn tests"

This is an automated email from the ASF dual-hosted git repository.

dwysakowicz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 1908b2ce6ffb8efc7d339136787494b4fe70846f
Author: Dawid Wysakowicz <dw...@apache.org>
AuthorDate: Thu Jul 16 13:57:14 2020 +0200

    Revert "[FLINK-18600] Temporary disable kerberized e2e yarn tests"
    
    This reverts commit 9036bc0fd8b24d0a270f964a8116f5db781e4b3c.
---
 flink-end-to-end-tests/run-nightly-tests.sh        | 11 +--
 .../test-scripts/test_pyflink.sh                   | 93 +++++++++++-----------
 2 files changed, 50 insertions(+), 54 deletions(-)

diff --git a/flink-end-to-end-tests/run-nightly-tests.sh b/flink-end-to-end-tests/run-nightly-tests.sh
index 2e56fa6..31cf2cd 100755
--- a/flink-end-to-end-tests/run-nightly-tests.sh
+++ b/flink-end-to-end-tests/run-nightly-tests.sh
@@ -132,13 +132,10 @@ if [[ ${PROFILE} != *"jdk11"* ]]; then
 
 	if [[ `uname -i` != 'aarch64' ]]; then
 		# Hadoop YARN deosn't support aarch64 at this moment. See: https://issues.apache.org/jira/browse/HADOOP-16723
-
-		# YARN tests disabled because we can no longer download oracle jdk. See FLINK-18600
-		#run_test "Running Kerberized YARN per-job on Docker test (default input)" "$END_TO_END_DIR/test-scripts/test_yarn_job_kerberos_docker.sh"
-		#run_test "Running Kerberized YARN per-job on Docker test (custom fs plugin)" "$END_TO_END_DIR/test-scripts/test_yarn_job_kerberos_docker.sh dummy-fs"
-		#run_test "Running Kerberized YARN application on Docker test (default input)" "$END_TO_END_DIR/test-scripts/test_yarn_application_kerberos_docker.sh"
-		#run_test "Running Kerberized YARN application on Docker test (custom fs plugin)" "$END_TO_END_DIR/test-scripts/test_yarn_application_kerberos_docker.sh dummy-fs"
-
+		run_test "Running Kerberized YARN per-job on Docker test (default input)" "$END_TO_END_DIR/test-scripts/test_yarn_job_kerberos_docker.sh"
+		run_test "Running Kerberized YARN per-job on Docker test (custom fs plugin)" "$END_TO_END_DIR/test-scripts/test_yarn_job_kerberos_docker.sh dummy-fs"
+		run_test "Running Kerberized YARN application on Docker test (default input)" "$END_TO_END_DIR/test-scripts/test_yarn_application_kerberos_docker.sh"
+		run_test "Running Kerberized YARN application on Docker test (custom fs plugin)" "$END_TO_END_DIR/test-scripts/test_yarn_application_kerberos_docker.sh dummy-fs"
 		run_test "Run Mesos WordCount test" "$END_TO_END_DIR/test-scripts/test_mesos_wordcount.sh"
 		run_test "Run Mesos multiple submission test" "$END_TO_END_DIR/test-scripts/test_mesos_multiple_submissions.sh"
 
diff --git a/flink-end-to-end-tests/test-scripts/test_pyflink.sh b/flink-end-to-end-tests/test-scripts/test_pyflink.sh
index 0cc0397..8877321 100755
--- a/flink-end-to-end-tests/test-scripts/test_pyflink.sh
+++ b/flink-end-to-end-tests/test-scripts/test_pyflink.sh
@@ -162,50 +162,49 @@ wait_job_terminal_state "$JOB_ID" "FINISHED"
 stop_cluster
 
 # These tests are known to fail on JDK11. See FLINK-13719
-# YARN tests disabled because we can no longer download oracle jdk. See FLINK-18600
-#if [[ ${PROFILE} != *"jdk11"* ]]; then
-#    cd "${CURRENT_DIR}/../"
-#    source "${CURRENT_DIR}"/common_yarn_docker.sh
-#    # test submitting on yarn
-#    start_hadoop_cluster_and_prepare_flink
-#
-#    # copy test files
-#    docker cp "${FLINK_PYTHON_DIR}/dev/lint-python.sh" master:/tmp/
-#    docker cp "${FLINK_PYTHON_TEST_DIR}/target/PythonUdfSqlJobExample.jar" master:/tmp/
-#    docker cp "${FLINK_PYTHON_TEST_DIR}/python/add_one.py" master:/tmp/
-#    docker cp "${REQUIREMENTS_PATH}" master:/tmp/
-#    docker cp "${FLINK_PYTHON_TEST_DIR}/python/python_job.py" master:/tmp/
-#    PYFLINK_PACKAGE_FILE=$(basename "${FLINK_PYTHON_DIR}"/dist/apache-flink-*.tar.gz)
-#    docker cp "${FLINK_PYTHON_DIR}/dist/${PYFLINK_PACKAGE_FILE}" master:/tmp/
-#
-#    # prepare environment
-#    docker exec master bash -c "
-#    /tmp/lint-python.sh -s miniconda
-#    source /tmp/.conda/bin/activate
-#    pip install /tmp/${PYFLINK_PACKAGE_FILE}
-#    conda install -y -q zip=3.0
-#    rm -rf /tmp/.conda/pkgs
-#    cd /tmp
-#    zip -q -r /tmp/venv.zip .conda
-#    echo \"taskmanager.memory.task.off-heap.size: 100m\" >> \"/home/hadoop-user/$FLINK_DIRNAME/conf/flink-conf.yaml\"
-#    "
-#
-#    docker exec master bash -c "export HADOOP_CLASSPATH=\`hadoop classpath\` && \
-#        export PYFLINK_CLIENT_EXECUTABLE=/tmp/.conda/bin/python && \
-#        /home/hadoop-user/$FLINK_DIRNAME/bin/flink run -m yarn-cluster -ytm 1500 -yjm 1000 \
-#        -pyfs /tmp/add_one.py \
-#        -pyreq /tmp/requirements.txt \
-#        -pyarch /tmp/venv.zip \
-#        -pyexec venv.zip/.conda/bin/python \
-#        /tmp/PythonUdfSqlJobExample.jar"
-#
-#    docker exec master bash -c "export HADOOP_CLASSPATH=\`hadoop classpath\` && \
-#        export PYFLINK_CLIENT_EXECUTABLE=/tmp/.conda/bin/python && \
-#        /home/hadoop-user/$FLINK_DIRNAME/bin/flink run -m yarn-cluster -ytm 1500 -yjm 1000 \
-#        -pyfs /tmp/add_one.py \
-#        -pyreq /tmp/requirements.txt \
-#        -pyarch /tmp/venv.zip \
-#        -pyexec venv.zip/.conda/bin/python \
-#        -py /tmp/python_job.py \
-#        pipeline.jars file:/tmp/PythonUdfSqlJobExample.jar"
-#fi
+if [[ ${PROFILE} != *"jdk11"* ]]; then
+    cd "${CURRENT_DIR}/../"
+    source "${CURRENT_DIR}"/common_yarn_docker.sh
+    # test submitting on yarn
+    start_hadoop_cluster_and_prepare_flink
+
+    # copy test files
+    docker cp "${FLINK_PYTHON_DIR}/dev/lint-python.sh" master:/tmp/
+    docker cp "${FLINK_PYTHON_TEST_DIR}/target/PythonUdfSqlJobExample.jar" master:/tmp/
+    docker cp "${FLINK_PYTHON_TEST_DIR}/python/add_one.py" master:/tmp/
+    docker cp "${REQUIREMENTS_PATH}" master:/tmp/
+    docker cp "${FLINK_PYTHON_TEST_DIR}/python/python_job.py" master:/tmp/
+    PYFLINK_PACKAGE_FILE=$(basename "${FLINK_PYTHON_DIR}"/dist/apache-flink-*.tar.gz)
+    docker cp "${FLINK_PYTHON_DIR}/dist/${PYFLINK_PACKAGE_FILE}" master:/tmp/
+
+    # prepare environment
+    docker exec master bash -c "
+    /tmp/lint-python.sh -s miniconda
+    source /tmp/.conda/bin/activate
+    pip install /tmp/${PYFLINK_PACKAGE_FILE}
+    conda install -y -q zip=3.0
+    rm -rf /tmp/.conda/pkgs
+    cd /tmp
+    zip -q -r /tmp/venv.zip .conda
+    echo \"taskmanager.memory.task.off-heap.size: 100m\" >> \"/home/hadoop-user/$FLINK_DIRNAME/conf/flink-conf.yaml\"
+    "
+
+    docker exec master bash -c "export HADOOP_CLASSPATH=\`hadoop classpath\` && \
+        export PYFLINK_CLIENT_EXECUTABLE=/tmp/.conda/bin/python && \
+        /home/hadoop-user/$FLINK_DIRNAME/bin/flink run -m yarn-cluster -ytm 1500 -yjm 1000 \
+        -pyfs /tmp/add_one.py \
+        -pyreq /tmp/requirements.txt \
+        -pyarch /tmp/venv.zip \
+        -pyexec venv.zip/.conda/bin/python \
+        /tmp/PythonUdfSqlJobExample.jar"
+
+    docker exec master bash -c "export HADOOP_CLASSPATH=\`hadoop classpath\` && \
+        export PYFLINK_CLIENT_EXECUTABLE=/tmp/.conda/bin/python && \
+        /home/hadoop-user/$FLINK_DIRNAME/bin/flink run -m yarn-cluster -ytm 1500 -yjm 1000 \
+        -pyfs /tmp/add_one.py \
+        -pyreq /tmp/requirements.txt \
+        -pyarch /tmp/venv.zip \
+        -pyexec venv.zip/.conda/bin/python \
+        -py /tmp/python_job.py \
+        pipeline.jars file:/tmp/PythonUdfSqlJobExample.jar"
+fi