You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nifi.apache.org by ab...@apache.org on 2021/03/08 13:13:05 UTC

[nifi-minifi-cpp] 01/02: MINIFICPP-1513 - Increase timeouts for behave tests involving http proxies, clean up logging

This is an automated email from the ASF dual-hosted git repository.

aboda pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/nifi-minifi-cpp.git

commit 63860782fdbb27def1d2d7fda8bd85ac91bbb74b
Author: Adam Hunyadi <hu...@gmail.com>
AuthorDate: Fri Feb 26 15:34:49 2021 +0100

    MINIFICPP-1513 - Increase timeouts for behave tests involving http proxies, clean up logging
    
    Signed-off-by: Arpad Boda <ab...@apache.org>
    
    This closes #1018
---
 docker/DockerVerify.sh                             |  2 +-
 .../integration/MiNiFi_integration_test_driver.py  |  1 -
 docker/test/integration/environment.py             |  1 -
 docker/test/integration/features/http.feature      |  2 +-
 docker/test/integration/features/s3.feature        |  6 +++---
 .../integration/minifi/core/DockerTestCluster.py   | 23 +++++++++++++++-------
 .../minifi/core/DockerTestDirectoryBindings.py     |  1 -
 7 files changed, 21 insertions(+), 15 deletions(-)

diff --git a/docker/DockerVerify.sh b/docker/DockerVerify.sh
index dc897c2..afb4077 100755
--- a/docker/DockerVerify.sh
+++ b/docker/DockerVerify.sh
@@ -72,7 +72,7 @@ export PATH
 PYTHONPATH="${PYTHONPATH}:${docker_dir}/test/integration"
 export PYTHONPATH
 
-BEHAVE_OPTS="-f pretty --logging-level INFO --no-capture"
+BEHAVE_OPTS="-f pretty --logging-level INFO --logging-clear-handlers"
 
 cd "${docker_dir}/test/integration"
 exec 
diff --git a/docker/test/integration/MiNiFi_integration_test_driver.py b/docker/test/integration/MiNiFi_integration_test_driver.py
index a1c63a0..ca5f04a 100644
--- a/docker/test/integration/MiNiFi_integration_test_driver.py
+++ b/docker/test/integration/MiNiFi_integration_test_driver.py
@@ -22,7 +22,6 @@ from minifi.validators.SingleFileOutputValidator import SingleFileOutputValidato
 
 class MiNiFi_integration_test():
     def __init__(self, context):
-        logging.info("MiNiFi_integration_test init")
         self.test_id = str(uuid.uuid4())
         self.clusters = {}
 
diff --git a/docker/test/integration/environment.py b/docker/test/integration/environment.py
index 68ddd42..4a3c12f 100644
--- a/docker/test/integration/environment.py
+++ b/docker/test/integration/environment.py
@@ -11,7 +11,6 @@ def raise_exception(exception):
 
 @fixture
 def test_driver_fixture(context):
-    logging.info("Integration test setup")
     context.test = MiNiFi_integration_test(context)
     yield context.test
     logging.info("Integration test teardown...")
diff --git a/docker/test/integration/features/http.feature b/docker/test/integration/features/http.feature
index 0652fd9..1f5e89d 100644
--- a/docker/test/integration/features/http.feature
+++ b/docker/test/integration/features/http.feature
@@ -40,7 +40,7 @@ Feature: Sending data using InvokeHTTP to a receiver using ListenHTTP
     And the "success" relationship of the ListenHTTP processor is connected to the PutFile
 
     When all instances start up
-    Then a flowfile with the content "test" is placed in the monitored directory in less than 60 seconds
+    Then a flowfile with the content "test" is placed in the monitored directory in less than 120 seconds
     And no errors were generated on the "http-proxy" regarding "http://minifi-listen:8080/contentListener"
 
   Scenario: A MiNiFi instance and transfers hashed data to another MiNiFi instance
diff --git a/docker/test/integration/features/s3.feature b/docker/test/integration/features/s3.feature
index 897bf18..03f0e11 100644
--- a/docker/test/integration/features/s3.feature
+++ b/docker/test/integration/features/s3.feature
@@ -40,7 +40,7 @@ Feature: Sending data from MiNiFi-C++ to an AWS server
     And the http proxy server "http-proxy" is set up 
     When all instances start up
 
-    Then a flowfile with the content "test" is placed in the monitored directory in less than 90 seconds
+    Then a flowfile with the content "test" is placed in the monitored directory in less than 150 seconds
     And the object on the "s3" s3 server is "LH_O#L|FD<FASD{FO#@$#$%^ \"#\"$L%:\"@#$L\":test_data#$#%#$%?{\"F{"
     And the object content type on the "s3" s3 server is "application/octet-stream" and the object metadata matches use metadata
     And no errors were generated on the "http-proxy" regarding "http://s3-server:9090/test_bucket/test_object_key"
@@ -102,7 +102,7 @@ Feature: Sending data from MiNiFi-C++ to an AWS server
 
     When all instances start up
 
-    Then a flowfile with the content "test" is placed in the monitored directory in less than 120 seconds
+    Then a flowfile with the content "test" is placed in the monitored directory in less than 150 seconds
     And the object bucket on the "s3" s3 server is empty
     And no errors were generated on the "http-proxy" regarding "http://s3-server:9090/test_bucket/test_object_key"
 
@@ -151,5 +151,5 @@ Feature: Sending data from MiNiFi-C++ to an AWS server
 
     When all instances start up
 
-    Then a flowfile with the content "test" is placed in the monitored directory in less than 120 seconds
+    Then a flowfile with the content "test" is placed in the monitored directory in less than 150 seconds
     And no errors were generated on the "http-proxy" regarding "http://s3-server:9090/test_bucket/test_object_key"
diff --git a/docker/test/integration/minifi/core/DockerTestCluster.py b/docker/test/integration/minifi/core/DockerTestCluster.py
index 772558a..2b0f13e 100644
--- a/docker/test/integration/minifi/core/DockerTestCluster.py
+++ b/docker/test/integration/minifi/core/DockerTestCluster.py
@@ -41,6 +41,15 @@ class DockerTestCluster(SingleNodeDockerCluster):
             return True
         return False
 
+    @staticmethod
+    def get_stdout_encoding():
+        # Use UTF-8 both when sys.stdout present but set to None (explicitly piped output
+        # and also some CI such as GitHub Actions).
+        encoding = getattr(sys.stdout, "encoding", None)
+        if encoding is None:
+            encoding = "utf8"
+        return encoding
+
     def get_app_log(self):
         for container in self.containers.values():
             container = self.client.containers.get(container.id)
@@ -91,7 +100,7 @@ class DockerTestCluster(SingleNodeDockerCluster):
                 raise Exception("Container failed to start up.")
 
     def check_http_proxy_access(self, url):
-        output = subprocess.check_output(["docker", "exec", "http-proxy", "cat", "/var/log/squid/access.log"]).decode(sys.stdout.encoding)
+        output = subprocess.check_output(["docker", "exec", "http-proxy", "cat", "/var/log/squid/access.log"]).decode(self.get_stdout_encoding())
         return url in output and \
             ((output.count("TCP_DENIED/407") != 0 and \
               output.count("TCP_MISS/200") == output.count("TCP_DENIED/407")) or \
@@ -99,21 +108,21 @@ class DockerTestCluster(SingleNodeDockerCluster):
 
     @retry_check()
     def check_s3_server_object_data(self, test_data):
-        s3_mock_dir = subprocess.check_output(["docker", "exec", "s3-server", "find", "/tmp/", "-type", "d", "-name", "s3mock*"]).decode(sys.stdout.encoding).strip()
-        file_data = subprocess.check_output(["docker", "exec", "s3-server", "cat", s3_mock_dir + "/test_bucket/test_object_key/fileData"]).decode(sys.stdout.encoding)
+        s3_mock_dir = subprocess.check_output(["docker", "exec", "s3-server", "find", "/tmp/", "-type", "d", "-name", "s3mock*"]).decode(self.get_stdout_encoding()).strip()
+        file_data = subprocess.check_output(["docker", "exec", "s3-server", "cat", s3_mock_dir + "/test_bucket/test_object_key/fileData"]).decode(self.get_stdout_encoding())
         return file_data == test_data
 
     @retry_check()
     def check_s3_server_object_metadata(self, content_type="application/octet-stream", metadata=dict()):
-        s3_mock_dir = subprocess.check_output(["docker", "exec", "s3-server", "find", "/tmp/", "-type", "d", "-name", "s3mock*"]).decode(sys.stdout.encoding).strip()
-        metadata_json = subprocess.check_output(["docker", "exec", "s3-server", "cat", s3_mock_dir + "/test_bucket/test_object_key/metadata"]).decode(sys.stdout.encoding)
+        s3_mock_dir = subprocess.check_output(["docker", "exec", "s3-server", "find", "/tmp/", "-type", "d", "-name", "s3mock*"]).decode(self.get_stdout_encoding()).strip()
+        metadata_json = subprocess.check_output(["docker", "exec", "s3-server", "cat", s3_mock_dir + "/test_bucket/test_object_key/metadata"]).decode(self.get_stdout_encoding())
         server_metadata = json.loads(metadata_json)
         return server_metadata["contentType"] == content_type and metadata == server_metadata["userMetadata"]
 
     @retry_check()
     def is_s3_bucket_empty(self):
-        s3_mock_dir = subprocess.check_output(["docker", "exec", "s3-server", "find", "/tmp/", "-type", "d", "-name", "s3mock*"]).decode(sys.stdout.encoding).strip()
-        ls_result = subprocess.check_output(["docker", "exec", "s3-server", "ls", s3_mock_dir + "/test_bucket/"]).decode(sys.stdout.encoding)
+        s3_mock_dir = subprocess.check_output(["docker", "exec", "s3-server", "find", "/tmp/", "-type", "d", "-name", "s3mock*"]).decode(self.get_stdout_encoding()).strip()
+        ls_result = subprocess.check_output(["docker", "exec", "s3-server", "ls", s3_mock_dir + "/test_bucket/"]).decode(self.get_stdout_encoding())
         return not ls_result
 
     def wait_for_container_logs(self, container_name, log, timeout, count=1):
diff --git a/docker/test/integration/minifi/core/DockerTestDirectoryBindings.py b/docker/test/integration/minifi/core/DockerTestDirectoryBindings.py
index d9b6a2b..28f9f77 100644
--- a/docker/test/integration/minifi/core/DockerTestDirectoryBindings.py
+++ b/docker/test/integration/minifi/core/DockerTestDirectoryBindings.py
@@ -55,7 +55,6 @@ class DockerTestDirectoryBindings:
 
     @staticmethod
     def create_directory(dir):
-        logging.info("Creating tmp dir: %s", dir)
         os.makedirs(dir)
         os.chmod(dir, 0o777)