You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@arrow.apache.org by ap...@apache.org on 2020/03/26 10:19:19 UTC

[arrow] branch master updated: ARROW-8151: [Dataset][Benchmarking] benchmark S3File performance

This is an automated email from the ASF dual-hosted git repository.

apitrou pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/arrow.git


The following commit(s) were added to refs/heads/master by this push:
     new 59903d0  ARROW-8151: [Dataset][Benchmarking] benchmark S3File performance
59903d0 is described below

commit 59903d089ec5b1766e1622d94c1f618b539b205d
Author: David Li <li...@gmail.com>
AuthorDate: Thu Mar 26 11:18:55 2020 +0100

    ARROW-8151: [Dataset][Benchmarking] benchmark S3File performance
    
    This does some basic refactoring of the S3 filesystem test to share some code, but I'd appreciate any feedback on how to better organize things.
    
    This currently has 4 sets of benchmarks:
    - A test of reading an entire file of various sizes from S3
    - A test of reading sequential small chunks of those files
    - A test of using coalescing to read those sequential small chunks
    - A test of reading a Parquet file
    
    Closes #6675 from lidavidm/arrow-8151-s3-bench
    
    Authored-by: David Li <li...@gmail.com>
    Signed-off-by: Antoine Pitrou <an...@python.org>
---
 cpp/src/arrow/filesystem/CMakeLists.txt    |   9 +
 cpp/src/arrow/filesystem/s3_test_util.h    | 157 +++++++++++++
 cpp/src/arrow/filesystem/s3fs_benchmark.cc | 340 +++++++++++++++++++++++++++++
 cpp/src/arrow/filesystem/s3fs_test.cc      |  92 +-------
 4 files changed, 507 insertions(+), 91 deletions(-)

diff --git a/cpp/src/arrow/filesystem/CMakeLists.txt b/cpp/src/arrow/filesystem/CMakeLists.txt
index bc1a1e7..d8a03c2 100644
--- a/cpp/src/arrow/filesystem/CMakeLists.txt
+++ b/cpp/src/arrow/filesystem/CMakeLists.txt
@@ -36,6 +36,15 @@ if(ARROW_S3)
                           ${GFLAGS_LIBRARIES} GTest::gtest)
     add_dependencies(arrow-tests arrow-s3fs-narrative-test)
   endif()
+
+  if(ARROW_BUILD_BENCHMARKS AND ARROW_PARQUET)
+    add_arrow_benchmark(s3fs_benchmark PREFIX "arrow-filesystem")
+    if(ARROW_TEST_LINKAGE STREQUAL "static")
+      target_link_libraries(arrow-filesystem-s3fs-benchmark PRIVATE parquet_static)
+    else()
+      target_link_libraries(arrow-filesystem-s3fs-benchmark PRIVATE parquet_shared)
+    endif()
+  endif()
 endif()
 
 if(ARROW_HDFS)
diff --git a/cpp/src/arrow/filesystem/s3_test_util.h b/cpp/src/arrow/filesystem/s3_test_util.h
new file mode 100644
index 0000000..c6376ca
--- /dev/null
+++ b/cpp/src/arrow/filesystem/s3_test_util.h
@@ -0,0 +1,157 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <memory>
+#include <sstream>
+#include <string>
+#include <utility>
+
+// boost/process/detail/windows/handle_workaround.hpp doesn't work
+// without BOOST_USE_WINDOWS_H with MinGW because MinGW doesn't
+// provide __kernel_entry without winternl.h.
+//
+// See also:
+// https://github.com/boostorg/process/blob/develop/include/boost/process/detail/windows/handle_workaround.hpp
+#include <boost/process.hpp>
+
+#include <gtest/gtest.h>
+
+#include <aws/core/Aws.h>
+
+#include "arrow/filesystem/s3fs.h"
+#include "arrow/status.h"
+#include "arrow/testing/gtest_util.h"
+#include "arrow/util/io_util.h"
+
+namespace arrow {
+namespace fs {
+
+using ::arrow::internal::TemporaryDir;
+
+namespace bp = boost::process;
+
+// TODO: allocate an ephemeral port
+static const char* kMinioExecutableName = "minio";
+static const char* kMinioAccessKey = "minio";
+static const char* kMinioSecretKey = "miniopass";
+
+// Environment variables to configure another S3-compatible service
+static const char* kEnvConnectString = "ARROW_TEST_S3_CONNECT_STRING";
+static const char* kEnvAccessKey = "ARROW_TEST_S3_ACCESS_KEY";
+static const char* kEnvSecretKey = "ARROW_TEST_S3_SECRET_KEY";
+
+static std::string GenerateConnectString() {
+  std::stringstream ss;
+  ss << "127.0.0.1:" << GetListenPort();
+  return ss.str();
+}
+
+// A minio test server, managed as a child process
+
+class MinioTestServer {
+ public:
+  Status Start();
+
+  Status Stop();
+
+  std::string connect_string() const { return connect_string_; }
+
+  std::string access_key() const { return access_key_; }
+
+  std::string secret_key() const { return secret_key_; }
+
+ private:
+  std::unique_ptr<TemporaryDir> temp_dir_;
+  std::string connect_string_;
+  std::string access_key_ = kMinioAccessKey;
+  std::string secret_key_ = kMinioSecretKey;
+  std::shared_ptr<::boost::process::child> server_process_;
+};
+
+Status MinioTestServer::Start() {
+  const char* connect_str = std::getenv(kEnvConnectString);
+  const char* access_key = std::getenv(kEnvAccessKey);
+  const char* secret_key = std::getenv(kEnvSecretKey);
+  if (connect_str && access_key && secret_key) {
+    // Use external instance
+    connect_string_ = connect_str;
+    access_key_ = access_key;
+    secret_key_ = secret_key;
+    return Status::OK();
+  }
+
+  ARROW_ASSIGN_OR_RAISE(temp_dir_, TemporaryDir::Make("s3fs-test-"));
+
+  // Get a copy of the current environment.
+  // (NOTE: using "auto" would return a native_environment that mutates
+  //  the current environment)
+  bp::environment env = boost::this_process::environment();
+  env["MINIO_ACCESS_KEY"] = kMinioAccessKey;
+  env["MINIO_SECRET_KEY"] = kMinioSecretKey;
+
+  connect_string_ = GenerateConnectString();
+
+  auto exe_path = bp::search_path(kMinioExecutableName);
+  if (exe_path.empty()) {
+    return Status::IOError("Failed to find minio executable ('", kMinioExecutableName,
+                           "') in PATH");
+  }
+
+  try {
+    // NOTE: --quiet makes startup faster by suppressing remote version check
+    server_process_ = std::make_shared<bp::child>(
+        env, exe_path, "server", "--quiet", "--compat", "--address", connect_string_,
+        temp_dir_->path().ToString());
+  } catch (const std::exception& e) {
+    return Status::IOError("Failed to launch Minio server: ", e.what());
+  }
+  return Status::OK();
+}
+
+Status MinioTestServer::Stop() {
+  if (server_process_ && server_process_->valid()) {
+    // Brutal shutdown
+    server_process_->terminate();
+    server_process_->wait();
+  }
+  return Status::OK();
+}
+
+// A global test "environment", to ensure that the S3 API is initialized before
+// running unit tests.
+
+class S3Environment : public ::testing::Environment {
+ public:
+  void SetUp() override {
+    // Change this to increase logging during tests
+    S3GlobalOptions options;
+    options.log_level = S3LogLevel::Fatal;
+    ASSERT_OK(InitializeS3(options));
+  }
+
+  void TearDown() override { ASSERT_OK(FinalizeS3()); }
+
+ protected:
+  Aws::SDKOptions options_;
+};
+
+::testing::Environment* s3_env = ::testing::AddGlobalTestEnvironment(new S3Environment);
+
+}  // namespace fs
+}  // namespace arrow
diff --git a/cpp/src/arrow/filesystem/s3fs_benchmark.cc b/cpp/src/arrow/filesystem/s3fs_benchmark.cc
new file mode 100644
index 0000000..755be3d
--- /dev/null
+++ b/cpp/src/arrow/filesystem/s3fs_benchmark.cc
@@ -0,0 +1,340 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <memory>
+#include <sstream>
+#include <utility>
+
+#include "benchmark/benchmark.h"
+
+#include <aws/core/auth/AWSCredentials.h>
+#include <aws/s3/S3Client.h>
+#include <aws/s3/model/CreateBucketRequest.h>
+#include <aws/s3/model/HeadBucketRequest.h>
+#include <aws/s3/model/PutObjectRequest.h>
+
+#include "arrow/filesystem/s3_internal.h"
+#include "arrow/filesystem/s3_test_util.h"
+#include "arrow/filesystem/s3fs.h"
+#include "arrow/io/caching.h"
+#include "arrow/io/interfaces.h"
+#include "arrow/status.h"
+#include "arrow/table.h"
+#include "arrow/testing/gtest_util.h"
+#include "arrow/testing/random.h"
+
+#include "parquet/arrow/reader.h"
+#include "parquet/arrow/writer.h"
+#include "parquet/properties.h"
+
+namespace arrow {
+namespace fs {
+
+using ::arrow::fs::internal::ConnectRetryStrategy;
+using ::arrow::fs::internal::OutcomeToStatus;
+using ::arrow::fs::internal::ToAwsString;
+
+// Environment variables to configure the S3 test environment
+static const char* kEnvBucketName = "ARROW_TEST_S3_BUCKET";
+static const char* kEnvSkipSetup = "ARROW_TEST_S3_SKIP_SETUP";
+static const char* kEnvAwsRegion = "ARROW_TEST_S3_REGION";
+
+// Set up Minio and create the test bucket and files.
+class MinioFixture : public benchmark::Fixture {
+ public:
+  void SetUp(const ::benchmark::State& state) override {
+    ASSERT_OK(minio_.Start());
+
+    const char* region_str = std::getenv(kEnvAwsRegion);
+    if (region_str) {
+      region_ = region_str;
+      std::cerr << "Using region from environment: " << region_ << std::endl;
+    } else {
+      std::cerr << "Using default region" << std::endl;
+    }
+
+    const char* bucket_str = std::getenv(kEnvBucketName);
+    if (bucket_str) {
+      bucket_ = bucket_str;
+      std::cerr << "Using bucket from environment: " << bucket_ << std::endl;
+    } else {
+      bucket_ = "bucket";
+      std::cerr << "Using default bucket: " << bucket_ << std::endl;
+    }
+
+    client_config_.endpointOverride = ToAwsString(minio_.connect_string());
+    client_config_.scheme = Aws::Http::Scheme::HTTP;
+    if (!region_.empty()) {
+      client_config_.region = ToAwsString(region_);
+    }
+    client_config_.retryStrategy = std::make_shared<ConnectRetryStrategy>();
+    credentials_ = {ToAwsString(minio_.access_key()), ToAwsString(minio_.secret_key())};
+    bool use_virtual_addressing = false;
+    client_.reset(
+        new Aws::S3::S3Client(credentials_, client_config_,
+                              Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never,
+                              use_virtual_addressing));
+
+    MakeFileSystem();
+
+    const char* skip_str = std::getenv(kEnvSkipSetup);
+    const std::string skip = skip_str ? std::string(skip_str) : "";
+    if (!skip.empty()) {
+      std::cerr << "Skipping creation of bucket/objects as requested" << std::endl;
+    } else {
+      ASSERT_OK(MakeBucket());
+      ASSERT_OK(MakeObject("bytes_1mib", 1024 * 1024));
+      ASSERT_OK(MakeObject("bytes_100mib", 100 * 1024 * 1024));
+      ASSERT_OK(MakeObject("bytes_500mib", 500 * 1024 * 1024));
+      ASSERT_OK(MakeParquetObject(bucket_ + "/pq_c100_r250k", 100, 250000));
+    }
+  }
+
+  void MakeFileSystem() {
+    options_.ConfigureAccessKey(minio_.access_key(), minio_.secret_key());
+    options_.scheme = "http";
+    if (!region_.empty()) {
+      options_.region = region_;
+    }
+    options_.endpoint_override = minio_.connect_string();
+    ASSERT_OK_AND_ASSIGN(fs_, S3FileSystem::Make(options_));
+  }
+
+  /// Set up bucket if it doesn't exist.
+  ///
+  /// When using Minio we'll have a fresh setup each time, but
+  /// otherwise we may have a leftover bucket.
+  Status MakeBucket() {
+    Aws::S3::Model::HeadBucketRequest head;
+    head.SetBucket(ToAwsString(bucket_));
+    const Status st = OutcomeToStatus(client_->HeadBucket(head));
+    if (st.ok()) {
+      // Bucket exists already
+      return st;
+    }
+    Aws::S3::Model::CreateBucketRequest req;
+    req.SetBucket(ToAwsString(bucket_));
+    return OutcomeToStatus(client_->CreateBucket(req));
+  }
+
+  /// Make an object with dummy data.
+  Status MakeObject(const std::string& name, int size) {
+    Aws::S3::Model::PutObjectRequest req;
+    req.SetBucket(ToAwsString(bucket_));
+    req.SetKey(ToAwsString(name));
+    req.SetBody(std::make_shared<std::stringstream>(std::string(size, 'a')));
+    return OutcomeToStatus(client_->PutObject(req));
+  }
+
+  /// Make an object with Parquet data.
+  Status MakeParquetObject(const std::string& path, int num_columns, int num_rows) {
+    std::vector<std::shared_ptr<ChunkedArray>> columns(num_columns);
+    std::vector<std::shared_ptr<Field>> fields(num_columns);
+    for (int i = 0; i < num_columns; ++i) {
+      arrow::random::RandomArrayGenerator generator(i);
+      std::shared_ptr<Array> values = generator.Float64(num_rows, -1.e10, 1e10, 0);
+      std::stringstream ss;
+      ss << "col" << i;
+      columns[i] = std::make_shared<ChunkedArray>(values);
+      fields[i] = ::arrow::field(ss.str(), values->type());
+    }
+    auto schema = std::make_shared<::arrow::Schema>(fields);
+
+    std::shared_ptr<Table> table = Table::Make(schema, columns);
+
+    std::shared_ptr<io::OutputStream> sink;
+    ARROW_ASSIGN_OR_RAISE(sink, fs_->OpenOutputStream(path));
+    RETURN_NOT_OK(
+        parquet::arrow::WriteTable(*table, arrow::default_memory_pool(), sink, num_rows));
+
+    return Status::OK();
+  }
+
+  void TearDown(const ::benchmark::State& state) override { ASSERT_OK(minio_.Stop()); }
+
+ protected:
+  MinioTestServer minio_;
+  std::string region_;
+  std::string bucket_;
+  Aws::Client::ClientConfiguration client_config_;
+  Aws::Auth::AWSCredentials credentials_;
+  std::unique_ptr<Aws::S3::S3Client> client_;
+  S3Options options_;
+  std::shared_ptr<S3FileSystem> fs_;
+};
+
+/// Set up/tear down the AWS SDK globally.
+/// (GBenchmark doesn't run GTest environments.)
+class S3BenchmarkEnvironment {
+ public:
+  S3BenchmarkEnvironment() { s3_env->SetUp(); }
+  ~S3BenchmarkEnvironment() { s3_env->TearDown(); }
+};
+
+S3BenchmarkEnvironment env{};
+
+/// Read the entire file into memory in one go to measure bandwidth.
+static void NaiveRead(benchmark::State& st, S3FileSystem* fs, const std::string& path) {
+  int64_t total_bytes = 0;
+  int total_items = 0;
+  for (auto _ : st) {
+    std::shared_ptr<io::RandomAccessFile> file;
+    std::shared_ptr<Buffer> buf;
+    int64_t size;
+    ASSERT_OK_AND_ASSIGN(file, fs->OpenInputFile(path));
+    ASSERT_OK_AND_ASSIGN(size, file->GetSize());
+    ASSERT_OK_AND_ASSIGN(buf, file->ReadAt(0, size));
+    total_bytes += buf->size();
+    total_items += 1;
+  }
+  st.SetBytesProcessed(total_bytes);
+  st.SetItemsProcessed(total_items);
+  std::cerr << "Read the file " << total_items << " times" << std::endl;
+}
+
+constexpr int64_t kChunkSize = 5 * 1024 * 1024;
+
+/// Mimic the Parquet reader, reading the file in small chunks.
+static void ChunkedRead(benchmark::State& st, S3FileSystem* fs, const std::string& path) {
+  int64_t total_bytes = 0;
+  int total_items = 0;
+  for (auto _ : st) {
+    std::shared_ptr<io::RandomAccessFile> file;
+    std::shared_ptr<Buffer> buf;
+    int64_t size = 0;
+    ASSERT_OK_AND_ASSIGN(file, fs->OpenInputFile(path));
+    ASSERT_OK_AND_ASSIGN(size, file->GetSize());
+    total_items += 1;
+
+    int64_t offset = 0;
+    while (offset < size) {
+      const int64_t read = std::min(size, kChunkSize);
+      ASSERT_OK_AND_ASSIGN(buf, file->ReadAt(offset, read));
+      total_bytes += buf->size();
+      offset += buf->size();
+    }
+  }
+  st.SetBytesProcessed(total_bytes);
+  st.SetItemsProcessed(total_items);
+  std::cerr << "Read the file " << total_items << " times" << std::endl;
+}
+
+/// Read the file in small chunks, but using read coalescing.
+static void CoalescedRead(benchmark::State& st, S3FileSystem* fs,
+                          const std::string& path) {
+  int64_t total_bytes = 0;
+  int total_items = 0;
+  for (auto _ : st) {
+    std::shared_ptr<io::RandomAccessFile> file;
+    std::shared_ptr<Buffer> buf;
+    int64_t size = 0;
+    ASSERT_OK_AND_ASSIGN(file, fs->OpenInputFile(path));
+    ASSERT_OK_AND_ASSIGN(size, file->GetSize());
+    total_items += 1;
+
+    io::internal::ReadRangeCache cache(file, 8192, 64 * 1024 * 1024);
+    std::vector<io::ReadRange> ranges;
+
+    int64_t offset = 0;
+    while (offset < size) {
+      const int64_t read = std::min(size, kChunkSize);
+      ranges.push_back(io::ReadRange{offset, read});
+      offset += read;
+    }
+    ASSERT_OK(cache.Cache(ranges));
+
+    offset = 0;
+    while (offset < size) {
+      const int64_t read = std::min(size, kChunkSize);
+      ASSERT_OK_AND_ASSIGN(buf, cache.Read({offset, read}));
+      total_bytes += buf->size();
+      offset += read;
+    }
+  }
+  st.SetBytesProcessed(total_bytes);
+  st.SetItemsProcessed(total_items);
+  std::cerr << "Read the file " << total_items << " times" << std::endl;
+}
+
+/// Read a Parquet file from S3.
+static void ParquetRead(benchmark::State& st, S3FileSystem* fs, const std::string& path) {
+  int64_t total_bytes = 0;
+  int total_items = 0;
+  for (auto _ : st) {
+    std::shared_ptr<io::RandomAccessFile> file;
+    int64_t size = 0;
+    ASSERT_OK_AND_ASSIGN(file, fs->OpenInputFile(path));
+    ASSERT_OK_AND_ASSIGN(size, file->GetSize());
+
+    parquet::ArrowReaderProperties properties;
+    properties.set_use_threads(true);
+    std::unique_ptr<parquet::arrow::FileReader> reader;
+    parquet::arrow::FileReaderBuilder builder;
+    ASSERT_OK(builder.Open(file));
+    ASSERT_OK(builder.properties(properties)->Build(&reader));
+    std::shared_ptr<RecordBatchReader> rb_reader;
+    ASSERT_OK(reader->GetRecordBatchReader({0}, &rb_reader));
+    std::shared_ptr<Table> table;
+    ASSERT_OK(rb_reader->ReadAll(&table));
+
+    // TODO: actually measure table memory usage
+    total_bytes += size;
+    total_items += 1;
+  }
+  st.SetBytesProcessed(total_bytes);
+  st.SetItemsProcessed(total_items);
+  std::cerr << "Read the file " << total_items << " times" << std::endl;
+}
+
+BENCHMARK_DEFINE_F(MinioFixture, ReadAll1Mib)(benchmark::State& st) {
+  NaiveRead(st, fs_.get(), bucket_ + "/bytes_1mib");
+}
+BENCHMARK_REGISTER_F(MinioFixture, ReadAll1Mib)->UseRealTime();
+BENCHMARK_DEFINE_F(MinioFixture, ReadAll100Mib)(benchmark::State& st) {
+  NaiveRead(st, fs_.get(), bucket_ + "/bytes_100mib");
+}
+BENCHMARK_REGISTER_F(MinioFixture, ReadAll100Mib)->UseRealTime();
+BENCHMARK_DEFINE_F(MinioFixture, ReadAll500Mib)(benchmark::State& st) {
+  NaiveRead(st, fs_.get(), bucket_ + "/bytes_500mib");
+}
+BENCHMARK_REGISTER_F(MinioFixture, ReadAll500Mib)->UseRealTime();
+
+BENCHMARK_DEFINE_F(MinioFixture, ReadChunked100Mib)(benchmark::State& st) {
+  ChunkedRead(st, fs_.get(), bucket_ + "/bytes_100mib");
+}
+BENCHMARK_REGISTER_F(MinioFixture, ReadChunked100Mib)->UseRealTime();
+BENCHMARK_DEFINE_F(MinioFixture, ReadChunked500Mib)(benchmark::State& st) {
+  ChunkedRead(st, fs_.get(), bucket_ + "/bytes_500mib");
+}
+BENCHMARK_REGISTER_F(MinioFixture, ReadChunked500Mib)->UseRealTime();
+
+BENCHMARK_DEFINE_F(MinioFixture, ReadCoalesced100Mib)(benchmark::State& st) {
+  CoalescedRead(st, fs_.get(), bucket_ + "/bytes_100mib");
+}
+BENCHMARK_REGISTER_F(MinioFixture, ReadCoalesced100Mib)->UseRealTime();
+BENCHMARK_DEFINE_F(MinioFixture, ReadCoalesced500Mib)(benchmark::State& st) {
+  CoalescedRead(st, fs_.get(), bucket_ + "/bytes_500mib");
+}
+BENCHMARK_REGISTER_F(MinioFixture, ReadCoalesced500Mib)->UseRealTime();
+
+BENCHMARK_DEFINE_F(MinioFixture, ReadParquet250K)(benchmark::State& st) {
+  ParquetRead(st, fs_.get(), bucket_ + "/pq_c100_r250k");
+}
+BENCHMARK_REGISTER_F(MinioFixture, ReadParquet250K)->UseRealTime();
+
+}  // namespace fs
+}  // namespace arrow
diff --git a/cpp/src/arrow/filesystem/s3fs_test.cc b/cpp/src/arrow/filesystem/s3fs_test.cc
index f69100a..d71e1bb 100644
--- a/cpp/src/arrow/filesystem/s3fs_test.cc
+++ b/cpp/src/arrow/filesystem/s3fs_test.cc
@@ -54,13 +54,13 @@
 
 #include "arrow/filesystem/filesystem.h"
 #include "arrow/filesystem/s3_internal.h"
+#include "arrow/filesystem/s3_test_util.h"
 #include "arrow/filesystem/s3fs.h"
 #include "arrow/filesystem/test_util.h"
 #include "arrow/result.h"
 #include "arrow/status.h"
 #include "arrow/testing/gtest_util.h"
 #include "arrow/testing/util.h"
-#include "arrow/util/io_util.h"
 #include "arrow/util/logging.h"
 #include "arrow/util/macros.h"
 
@@ -68,7 +68,6 @@ namespace arrow {
 namespace fs {
 
 using ::arrow::internal::PlatformFilename;
-using ::arrow::internal::TemporaryDir;
 using ::arrow::internal::UriEscape;
 
 using ::arrow::fs::internal::ConnectRetryStrategy;
@@ -102,95 +101,6 @@ namespace bp = boost::process;
   ARROW_AWS_ASSIGN_OR_FAIL_IMPL(             \
       ARROW_AWS_ASSIGN_OR_FAIL_NAME(_aws_error_or_value, __COUNTER__), lhs, rexpr);
 
-// TODO: allocate an ephemeral port
-static const char* kMinioExecutableName = "minio";
-static const char* kMinioAccessKey = "minio";
-static const char* kMinioSecretKey = "miniopass";
-
-static std::string GenerateConnectString() {
-  std::stringstream ss;
-  ss << "127.0.0.1:" << GetListenPort();
-  return ss.str();
-}
-
-// A minio test server, managed as a child process
-
-class MinioTestServer {
- public:
-  Status Start();
-
-  Status Stop();
-
-  std::string connect_string() const { return connect_string_; }
-
-  std::string access_key() const { return kMinioAccessKey; }
-
-  std::string secret_key() const { return kMinioSecretKey; }
-
- private:
-  std::unique_ptr<TemporaryDir> temp_dir_;
-  std::string connect_string_;
-  std::shared_ptr<::boost::process::child> server_process_;
-};
-
-Status MinioTestServer::Start() {
-  ARROW_ASSIGN_OR_RAISE(temp_dir_, TemporaryDir::Make("s3fs-test-"));
-
-  // Get a copy of the current environment.
-  // (NOTE: using "auto" would return a native_environment that mutates
-  //  the current environment)
-  bp::environment env = boost::this_process::environment();
-  env["MINIO_ACCESS_KEY"] = kMinioAccessKey;
-  env["MINIO_SECRET_KEY"] = kMinioSecretKey;
-
-  connect_string_ = GenerateConnectString();
-
-  auto exe_path = bp::search_path(kMinioExecutableName);
-  if (exe_path.empty()) {
-    return Status::IOError("Failed to find minio executable ('", kMinioExecutableName,
-                           "') in PATH");
-  }
-
-  try {
-    // NOTE: --quiet makes startup faster by suppressing remote version check
-    server_process_ = std::make_shared<bp::child>(
-        env, exe_path, "server", "--quiet", "--compat", "--address", connect_string_,
-        temp_dir_->path().ToString());
-  } catch (const std::exception& e) {
-    return Status::IOError("Failed to launch Minio server: ", e.what());
-  }
-  return Status::OK();
-}
-
-Status MinioTestServer::Stop() {
-  if (server_process_ && server_process_->valid()) {
-    // Brutal shutdown
-    server_process_->terminate();
-    server_process_->wait();
-  }
-  return Status::OK();
-}
-
-// A global test "environment", to ensure that the S3 API is initialized before
-// running unit tests.
-
-class S3Environment : public ::testing::Environment {
- public:
-  void SetUp() override {
-    // Change this to increase logging during tests
-    S3GlobalOptions options;
-    options.log_level = S3LogLevel::Fatal;
-    ASSERT_OK(InitializeS3(options));
-  }
-
-  void TearDown() override { ASSERT_OK(FinalizeS3()); }
-
- protected:
-  Aws::SDKOptions options_;
-};
-
-::testing::Environment* s3_env = ::testing::AddGlobalTestEnvironment(new S3Environment);
-
 class S3TestMixin : public ::testing::Test {
  public:
   void SetUp() override {