You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@arrow.apache.org by we...@apache.org on 2018/08/06 18:41:55 UTC

[arrow] branch master updated: ARROW-2061: [C++] Make tests a bit faster with Valgrind

This is an automated email from the ASF dual-hosted git repository.

wesm pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/arrow.git


The following commit(s) were added to refs/heads/master by this push:
     new 0b654ce  ARROW-2061: [C++] Make tests a bit faster with Valgrind
0b654ce is described below

commit 0b654ce8087d685b05f3d28b7c0ae9768946acbc
Author: Antoine Pitrou <an...@python.org>
AuthorDate: Mon Aug 6 14:41:50 2018 -0400

    ARROW-2061: [C++] Make tests a bit faster with Valgrind
    
    Saves around 80 seconds on Travis-CI.
    
    Author: Antoine Pitrou <an...@python.org>
    
    Closes #2377 from pitrou/ARROW-2061-valgrind-test-speed and squashes the following commits:
    
    43a1e0e1 <Antoine Pitrou> ARROW-2061:  Make tests a bit faster with Valgrind
---
 cpp/src/arrow/array-test.cc              |  5 +++-
 cpp/src/arrow/compute/compute-test.cc    | 51 +++++++++++++++++++-------------
 cpp/src/arrow/io/io-memory-test.cc       | 11 +++++--
 cpp/src/arrow/ipc/ipc-read-write-test.cc |  3 ++
 4 files changed, 47 insertions(+), 23 deletions(-)

diff --git a/cpp/src/arrow/array-test.cc b/cpp/src/arrow/array-test.cc
index b7bad67..8b78762 100644
--- a/cpp/src/arrow/array-test.cc
+++ b/cpp/src/arrow/array-test.cc
@@ -247,10 +247,13 @@ TEST_F(TestArray, TestIsNullIsValidNoNulls) {
 TEST_F(TestArray, BuildLargeInMemoryArray) {
 #ifdef NDEBUG
   const int64_t length = static_cast<int64_t>(std::numeric_limits<int32_t>::max()) + 1;
-#else
+#elif !defined(ARROW_VALGRIND)
   // use a smaller size since the insert function isn't optimized properly on debug and
   // the test takes a long time to complete
   const int64_t length = 2 << 24;
+#else
+  // use an even smaller size with valgrind
+  const int64_t length = 2 << 20;
 #endif
 
   BooleanBuilder builder;
diff --git a/cpp/src/arrow/compute/compute-test.cc b/cpp/src/arrow/compute/compute-test.cc
index 6a92844..ba5c935 100644
--- a/cpp/src/arrow/compute/compute-test.cc
+++ b/cpp/src/arrow/compute/compute-test.cc
@@ -1034,24 +1034,29 @@ TEST_F(TestHashKernel, DictEncodeBinary) {
 }
 
 TEST_F(TestHashKernel, BinaryResizeTable) {
-  const int64_t kTotalValues = 10000;
-  const int64_t kRepeats = 10;
+  const int32_t kTotalValues = 10000;
+#if !defined(ARROW_VALGRIND)
+  const int32_t kRepeats = 10;
+#else
+  // Mitigate Valgrind's slowness
+  const int32_t kRepeats = 3;
+#endif
 
   vector<std::string> values;
   vector<std::string> uniques;
   vector<int32_t> indices;
-  for (int64_t i = 0; i < kTotalValues * kRepeats; i++) {
-    int64_t index = i % kTotalValues;
-    std::stringstream ss;
-    ss << "test" << index;
-    std::string val = ss.str();
+  char buf[20] = "test";
 
-    values.push_back(val);
+  for (int32_t i = 0; i < kTotalValues * kRepeats; i++) {
+    int32_t index = i % kTotalValues;
+
+    ASSERT_GE(snprintf(buf + 4, sizeof(buf) - 4, "%d", index), 0);
+    values.emplace_back(buf);
 
     if (i < kTotalValues) {
-      uniques.push_back(val);
+      uniques.push_back(values.back());
     }
-    indices.push_back(static_cast<int32_t>(i % kTotalValues));
+    indices.push_back(index);
   }
 
   CheckUnique<BinaryType, std::string>(&this->ctx_, binary(), values, {}, uniques, {});
@@ -1076,24 +1081,30 @@ TEST_F(TestHashKernel, DictEncodeFixedSizeBinary) {
 }
 
 TEST_F(TestHashKernel, FixedSizeBinaryResizeTable) {
-  const int64_t kTotalValues = 10000;
-  const int64_t kRepeats = 10;
+  const int32_t kTotalValues = 10000;
+#if !defined(ARROW_VALGRIND)
+  const int32_t kRepeats = 10;
+#else
+  // Mitigate Valgrind's slowness
+  const int32_t kRepeats = 3;
+#endif
 
   vector<std::string> values;
   vector<std::string> uniques;
   vector<int32_t> indices;
-  for (int64_t i = 0; i < kTotalValues * kRepeats; i++) {
-    int64_t index = i % kTotalValues;
-    std::stringstream ss;
-    ss << "test" << static_cast<char>(index / 128) << static_cast<char>(index % 128);
-    std::string val = ss.str();
+  char buf[7] = "test..";
 
-    values.push_back(val);
+  for (int32_t i = 0; i < kTotalValues * kRepeats; i++) {
+    int32_t index = i % kTotalValues;
+
+    buf[4] = static_cast<char>(index / 128);
+    buf[5] = static_cast<char>(index % 128);
+    values.emplace_back(buf, 6);
 
     if (i < kTotalValues) {
-      uniques.push_back(val);
+      uniques.push_back(values.back());
     }
-    indices.push_back(static_cast<int32_t>(i % kTotalValues));
+    indices.push_back(index);
   }
 
   auto type = fixed_size_binary(6);
diff --git a/cpp/src/arrow/io/io-memory-test.cc b/cpp/src/arrow/io/io-memory-test.cc
index d80aaec..62305a6 100644
--- a/cpp/src/arrow/io/io-memory-test.cc
+++ b/cpp/src/arrow/io/io-memory-test.cc
@@ -131,9 +131,16 @@ TEST(TestBufferReader, RetainParentReference) {
 }
 
 TEST(TestMemcopy, ParallelMemcopy) {
+#if defined(ARROW_VALGRIND)
+  // Compensate for Valgrind's slowness
+  constexpr int64_t THRESHOLD = 32 * 1024;
+#else
+  constexpr int64_t THRESHOLD = 1024 * 1024;
+#endif
+
   for (int i = 0; i < 5; ++i) {
     // randomize size so the memcopy alignment is tested
-    int64_t total_size = 3 * 1024 * 1024 + std::rand() % 100;
+    int64_t total_size = 3 * THRESHOLD + std::rand() % 100;
 
     std::shared_ptr<Buffer> buffer1, buffer2;
 
@@ -144,7 +151,7 @@ TEST(TestMemcopy, ParallelMemcopy) {
 
     io::FixedSizeBufferWriter writer(buffer1);
     writer.set_memcopy_threads(4);
-    writer.set_memcopy_threshold(1024 * 1024);
+    writer.set_memcopy_threshold(THRESHOLD);
     ASSERT_OK(writer.Write(buffer2->data(), buffer2->size()));
 
     ASSERT_EQ(0, memcmp(buffer1->data(), buffer2->data(), buffer1->size()));
diff --git a/cpp/src/arrow/ipc/ipc-read-write-test.cc b/cpp/src/arrow/ipc/ipc-read-write-test.cc
index baf067e..f6e49ea 100644
--- a/cpp/src/arrow/ipc/ipc-read-write-test.cc
+++ b/cpp/src/arrow/ipc/ipc-read-write-test.cc
@@ -498,8 +498,11 @@ TEST_F(RecursionLimits, StressLimit) {
   CheckDepth(100, &it_works);
   ASSERT_TRUE(it_works);
 
+// Mitigate Valgrind's slowness
+#if !defined(ARROW_VALGRIND)
   CheckDepth(500, &it_works);
   ASSERT_TRUE(it_works);
+#endif
 }
 #endif  // !defined(_WIN32) || defined(NDEBUG)