You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@iceberg.apache.org by GitBox <gi...@apache.org> on 2021/06/04 16:35:28 UTC

[GitHub] [iceberg] aokolnychyi commented on a change in pull request #2415: Spark: Add an action to remove all referenced files

aokolnychyi commented on a change in pull request #2415:
URL: https://github.com/apache/iceberg/pull/2415#discussion_r645684299



##########
File path: spark/src/main/java/org/apache/iceberg/spark/actions/BaseRemoveFilesSparkAction.java
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.spark.actions;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Consumer;
+import org.apache.iceberg.ReachableFileUtil;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TableMetadata;
+import org.apache.iceberg.TableMetadataParser;
+import org.apache.iceberg.actions.BaseRemoveFilesActionResult;
+import org.apache.iceberg.actions.RemoveFiles;
+import org.apache.iceberg.exceptions.NotFoundException;
+import org.apache.iceberg.hadoop.HadoopFileIO;
+import org.apache.iceberg.io.FileIO;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.spark.JobGroupInfo;
+import org.apache.iceberg.util.PropertyUtil;
+import org.apache.iceberg.util.Tasks;
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Encoders;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.SparkSession;
+import org.apache.spark.sql.functions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * An implementation of {@link RemoveFiles} that uses metadata tables in Spark
+ * to determine which files should be deleted.
+ */
+@SuppressWarnings("UnnecessaryAnonymousClass")
+public class BaseRemoveFilesSparkAction
+    extends BaseSparkAction<RemoveFiles, RemoveFiles.Result> implements RemoveFiles {
+  private static final Logger LOG = LoggerFactory.getLogger(BaseRemoveFilesSparkAction.class);
+
+  private static final String DATA_FILE = "Data File";
+  private static final String MANIFEST = "Manifest";
+  private static final String MANIFEST_LIST = "Manifest List";
+  private static final String OTHERS = "Others";
+
+  private static final String STREAM_RESULTS = "stream-results";
+
+  // Creates an executor service that runs each task in the thread that invokes execute/submit.
+  private static final ExecutorService DEFAULT_DELETE_EXECUTOR_SERVICE = null;
+
+  private final String metadataLocation;
+  private final Consumer<String> defaultDelete = new Consumer<String>() {
+    @Override
+    public void accept(String file) {
+      io.deleteFile(file);
+    }
+  };
+
+  private Consumer<String> removeFunc = defaultDelete;
+  private ExecutorService removeExecutorService = DEFAULT_DELETE_EXECUTOR_SERVICE;
+  private FileIO io = new HadoopFileIO();
+
+  public BaseRemoveFilesSparkAction(SparkSession spark, String metadataLocation) {
+    super(spark);
+    this.metadataLocation = metadataLocation;
+  }
+
+  @Override
+  protected RemoveFiles self() {
+    return this;
+  }
+
+  @Override
+  public Result execute() {
+    if (io == null) {

Review comment:
       I think we should use `Preconditions.checkArgument` here.

##########
File path: spark/src/main/java/org/apache/iceberg/spark/actions/BaseRemoveFilesSparkAction.java
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.spark.actions;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Consumer;
+import org.apache.iceberg.ReachableFileUtil;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TableMetadata;
+import org.apache.iceberg.TableMetadataParser;
+import org.apache.iceberg.actions.BaseRemoveFilesActionResult;
+import org.apache.iceberg.actions.RemoveFiles;
+import org.apache.iceberg.exceptions.NotFoundException;
+import org.apache.iceberg.hadoop.HadoopFileIO;
+import org.apache.iceberg.io.FileIO;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.spark.JobGroupInfo;
+import org.apache.iceberg.util.PropertyUtil;
+import org.apache.iceberg.util.Tasks;
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Encoders;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.SparkSession;
+import org.apache.spark.sql.functions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * An implementation of {@link RemoveFiles} that uses metadata tables in Spark
+ * to determine which files should be deleted.
+ */
+@SuppressWarnings("UnnecessaryAnonymousClass")
+public class BaseRemoveFilesSparkAction
+    extends BaseSparkAction<RemoveFiles, RemoveFiles.Result> implements RemoveFiles {
+  private static final Logger LOG = LoggerFactory.getLogger(BaseRemoveFilesSparkAction.class);
+
+  private static final String DATA_FILE = "Data File";
+  private static final String MANIFEST = "Manifest";
+  private static final String MANIFEST_LIST = "Manifest List";
+  private static final String OTHERS = "Others";
+
+  private static final String STREAM_RESULTS = "stream-results";
+
+  // Creates an executor service that runs each task in the thread that invokes execute/submit.
+  private static final ExecutorService DEFAULT_DELETE_EXECUTOR_SERVICE = null;
+
+  private final String metadataLocation;
+  private final Consumer<String> defaultDelete = new Consumer<String>() {
+    @Override
+    public void accept(String file) {
+      io.deleteFile(file);
+    }
+  };
+
+  private Consumer<String> removeFunc = defaultDelete;
+  private ExecutorService removeExecutorService = DEFAULT_DELETE_EXECUTOR_SERVICE;
+  private FileIO io = new HadoopFileIO();
+
+  public BaseRemoveFilesSparkAction(SparkSession spark, String metadataLocation) {
+    super(spark);
+    this.metadataLocation = metadataLocation;
+  }
+
+  @Override
+  protected RemoveFiles self() {
+    return this;
+  }
+
+  @Override
+  public Result execute() {
+    if (io == null) {
+      throw new RuntimeException("IO needs to be set for removing the files");
+    }
+    String msg = String.format("Removing files reachable from %s", metadataLocation);
+    JobGroupInfo info = newJobGroupInfo("REMOVE-FILES", msg);
+    return withJobGroupInfo(info, this::doExecute);
+  }
+
+  private Result doExecute() {
+    boolean streamResults = PropertyUtil.propertyAsBoolean(options(), STREAM_RESULTS, false);
+    TableMetadata metadata = TableMetadataParser.read(io, metadataLocation);
+    Dataset<Row> validFileDF = buildValidFileDF(metadata).distinct();
+    if (streamResults) {
+      return deleteFiles(validFileDF.toLocalIterator());
+    } else {
+      return deleteFiles(validFileDF.collectAsList().iterator());
+    }
+  }
+
+  private Dataset<Row> projectFilePathWithType(Dataset<Row> ds, String type) {
+    return ds.select(functions.col("file_path"), functions.lit(type).as("file_type"));
+  }
+
+  private Dataset<Row> buildValidFileDF(TableMetadata metadata) {
+    Table staticTable = newStaticTable(metadata, io);
+    return projectFilePathWithType(buildValidDataFileDF(staticTable), DATA_FILE)
+        .union(projectFilePathWithType(buildManifestFileDF(staticTable), MANIFEST))
+        .union(projectFilePathWithType(buildManifestListDF(staticTable), MANIFEST_LIST))
+        .union(projectFilePathWithType(buildOtherMetadataFileDF(staticTable), OTHERS));
+  }
+
+  @Override
+  protected Dataset<Row> buildOtherMetadataFileDF(Table table) {
+    List<String> otherMetadataFiles = Lists.newArrayList();
+    otherMetadataFiles.addAll(ReachableFileUtil.metadataFileLocations(table, true));
+    // otherMetadataFiles.add(ReachableFileUtil.versionHintLocation(table));
+    return spark().createDataset(otherMetadataFiles, Encoders.STRING()).toDF("file_path");
+  }
+
+
+  @Override
+  public RemoveFiles io(FileIO fileIO) {

Review comment:
       It will be a bit more consistent with other actions if we `io`, `deleteWith`, `executeDeleteWith` after `self` at the top of the class. Like we have in `BaseExpireSnapshotsSparkAction`, for example.

##########
File path: spark3/src/test/java/org/apache/iceberg/actions/TestRemoveFilesAction.java
##########
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.actions;
+
+import java.io.File;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.iceberg.AssertHelpers;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DataFiles;
+import org.apache.iceberg.HasTableOperations;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TableProperties;
+import org.apache.iceberg.TestHelpers;
+import org.apache.iceberg.hadoop.HadoopTables;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.spark.SparkTestBase;
+import org.apache.iceberg.spark.actions.SparkActions;
+import org.apache.iceberg.types.Types;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.apache.iceberg.types.Types.NestedField.optional;
+
+public class TestRemoveFilesAction extends SparkTestBase {
+  private static final HadoopTables TABLES = new HadoopTables(new Configuration());
+  private static final Schema SCHEMA = new Schema(
+      optional(1, "c1", Types.IntegerType.get()),
+      optional(2, "c2", Types.StringType.get()),
+      optional(3, "c3", Types.StringType.get())
+  );
+  private static final int SHUFFLE_PARTITIONS = 2;
+
+  private static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA).identity("c1").build();
+
+  static final DataFile FILE_A = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-a.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(0))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_B = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-b.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(1))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_C = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-c.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(2))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_D = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-d.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(3))
+      .withRecordCount(1)
+      .build();
+
+  @Rule
+  public TemporaryFolder temp = new TemporaryFolder();
+
+  private Table table;
+
+  @Before
+  public void setupTableLocation() throws Exception {
+    File tableDir = temp.newFolder();
+    String tableLocation = tableDir.toURI().toString();
+    this.table = TABLES.create(SCHEMA, SPEC, Maps.newHashMap(), tableLocation);
+    spark.conf().set("spark.sql.shuffle.partitions", SHUFFLE_PARTITIONS);
+  }
+
+  private void checkRemoveFilesResults(long expectedDatafiles, long expectedManifestsDeleted,
+                                       long expectedManifestListsDeleted, long expectedOtherFilesDeleted,
+                                       RemoveFiles.Result results) {
+    Assert.assertEquals("Incorrect number of manifest files deleted",
+        expectedManifestsDeleted,  results.removedManifestsCount());
+    Assert.assertEquals("Incorrect number of datafiles deleted",
+        expectedDatafiles, results.removedDataFilesCount());
+    Assert.assertEquals("Incorrect number of manifest lists deleted",
+        expectedManifestListsDeleted, results.removedManifestListsCount());
+    Assert.assertEquals("Incorrect number of other lists deleted",
+        expectedOtherFilesDeleted - 1, results.otherRemovedFilesCount());
+  }
+
+  @Test
+  public void dataFilesCleanupWithParallelTasks() {
+    table.newFastAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    table.newFastAppend()
+        .appendFile(FILE_B)
+        .commit();
+
+    table.newRewrite()
+        .rewriteFiles(ImmutableSet.of(FILE_B), ImmutableSet.of(FILE_D))
+        .commit();
+
+    table.newRewrite()
+        .rewriteFiles(ImmutableSet.of(FILE_A), ImmutableSet.of(FILE_C))
+        .commit();
+
+    Set<String> deletedFiles = Sets.newHashSet();
+    Set<String> deleteThreads = ConcurrentHashMap.newKeySet();
+    AtomicInteger deleteThreadsIndex = new AtomicInteger(0);
+
+    RemoveFiles.Result result = getSparkActions().removeFiles(getMetadataLocation(table))
+        .io(table.io())
+        .executeRemoveWith(Executors.newFixedThreadPool(4, runnable -> {
+          Thread thread = new Thread(runnable);
+          thread.setName("remove-files-" + deleteThreadsIndex.getAndIncrement());
+          thread.setDaemon(true); // daemon threads will be terminated abruptly when the JVM exits
+          return thread;
+        }))
+        .removeWith(s -> {
+          deleteThreads.add(Thread.currentThread().getName());
+          deletedFiles.add(s);
+        })
+        .execute();
+
+    // Verifies that the delete methods ran in the threads created by the provided ExecutorService ThreadFactory
+    Assert.assertEquals(deleteThreads,

Review comment:
       Is it guaranteed all 4 threads are going to be used?

##########
File path: spark3/src/test/java/org/apache/iceberg/actions/TestRemoveFilesAction.java
##########
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.actions;
+
+import java.io.File;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.iceberg.AssertHelpers;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DataFiles;
+import org.apache.iceberg.HasTableOperations;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TableProperties;
+import org.apache.iceberg.TestHelpers;
+import org.apache.iceberg.hadoop.HadoopTables;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.spark.SparkTestBase;
+import org.apache.iceberg.spark.actions.SparkActions;
+import org.apache.iceberg.types.Types;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.apache.iceberg.types.Types.NestedField.optional;
+
+public class TestRemoveFilesAction extends SparkTestBase {
+  private static final HadoopTables TABLES = new HadoopTables(new Configuration());
+  private static final Schema SCHEMA = new Schema(
+      optional(1, "c1", Types.IntegerType.get()),
+      optional(2, "c2", Types.StringType.get()),
+      optional(3, "c3", Types.StringType.get())
+  );
+  private static final int SHUFFLE_PARTITIONS = 2;
+
+  private static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA).identity("c1").build();
+
+  static final DataFile FILE_A = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-a.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(0))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_B = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-b.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(1))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_C = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-c.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(2))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_D = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-d.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(3))
+      .withRecordCount(1)
+      .build();
+
+  @Rule
+  public TemporaryFolder temp = new TemporaryFolder();
+
+  private Table table;
+
+  @Before
+  public void setupTableLocation() throws Exception {
+    File tableDir = temp.newFolder();
+    String tableLocation = tableDir.toURI().toString();
+    this.table = TABLES.create(SCHEMA, SPEC, Maps.newHashMap(), tableLocation);
+    spark.conf().set("spark.sql.shuffle.partitions", SHUFFLE_PARTITIONS);
+  }
+
+  private void checkRemoveFilesResults(long expectedDatafiles, long expectedManifestsDeleted,
+                                       long expectedManifestListsDeleted, long expectedOtherFilesDeleted,
+                                       RemoveFiles.Result results) {
+    Assert.assertEquals("Incorrect number of manifest files deleted",
+        expectedManifestsDeleted,  results.removedManifestsCount());
+    Assert.assertEquals("Incorrect number of datafiles deleted",
+        expectedDatafiles, results.removedDataFilesCount());
+    Assert.assertEquals("Incorrect number of manifest lists deleted",
+        expectedManifestListsDeleted, results.removedManifestListsCount());
+    Assert.assertEquals("Incorrect number of other lists deleted",
+        expectedOtherFilesDeleted - 1, results.otherRemovedFilesCount());
+  }
+
+  @Test
+  public void dataFilesCleanupWithParallelTasks() {
+    table.newFastAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    table.newFastAppend()
+        .appendFile(FILE_B)
+        .commit();
+
+    table.newRewrite()
+        .rewriteFiles(ImmutableSet.of(FILE_B), ImmutableSet.of(FILE_D))
+        .commit();
+
+    table.newRewrite()
+        .rewriteFiles(ImmutableSet.of(FILE_A), ImmutableSet.of(FILE_C))
+        .commit();
+
+    Set<String> deletedFiles = Sets.newHashSet();
+    Set<String> deleteThreads = ConcurrentHashMap.newKeySet();
+    AtomicInteger deleteThreadsIndex = new AtomicInteger(0);
+
+    RemoveFiles.Result result = getSparkActions().removeFiles(getMetadataLocation(table))
+        .io(table.io())
+        .executeRemoveWith(Executors.newFixedThreadPool(4, runnable -> {
+          Thread thread = new Thread(runnable);
+          thread.setName("remove-files-" + deleteThreadsIndex.getAndIncrement());
+          thread.setDaemon(true); // daemon threads will be terminated abruptly when the JVM exits
+          return thread;
+        }))
+        .removeWith(s -> {
+          deleteThreads.add(Thread.currentThread().getName());
+          deletedFiles.add(s);
+        })
+        .execute();
+
+    // Verifies that the delete methods ran in the threads created by the provided ExecutorService ThreadFactory
+    Assert.assertEquals(deleteThreads,
+        Sets.newHashSet("remove-files-0", "remove-files-1", "remove-files-2", "remove-files-3"));
+
+    Assert.assertTrue("FILE_A should be deleted", deletedFiles.contains(FILE_A.path().toString()));

Review comment:
       What about C and D?

##########
File path: api/src/main/java/org/apache/iceberg/actions/RemoveFiles.java
##########
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.actions;
+
+import java.util.concurrent.ExecutorService;
+import java.util.function.Consumer;
+import org.apache.iceberg.io.FileIO;
+
+/**
+ * An action that removes all files referenced by a table metadata file.
+ * <p>
+ * This action will irreversibly delete all reachable files such as data files, manifests,
+ * manifest lists and should be used to clean up the underlying storage once a table is dropped
+ * and no longer needed.
+ * <p>
+ * Implementations may use a query engine to distribute parts of work.
+ */
+public interface RemoveFiles extends Action<RemoveFiles, RemoveFiles.Result> {
+
+  /**
+   * Passes an alternative delete implementation that will be used for files.
+   *
+   * @param removeFunc a function that will be called to delete files.
+   *                   The function accepts path to file as an argument.
+   * @return this for method chaining
+   */
+  RemoveFiles removeWith(Consumer<String> removeFunc);
+
+  /**
+   * Passes an alternative executor service that will be used for files removal.
+   * <p>
+   * If this method is not called, files will still be deleted in the current thread.
+   * <p>
+   *
+   * @param executorService the service to use
+   * @return this for method chaining
+   */
+  RemoveFiles executeRemoveWith(ExecutorService executorService);

Review comment:
       Same here: `executeDeleteWith`

##########
File path: spark/src/main/java/org/apache/iceberg/spark/actions/BaseRemoveFilesSparkAction.java
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.spark.actions;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Consumer;
+import org.apache.iceberg.ReachableFileUtil;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TableMetadata;
+import org.apache.iceberg.TableMetadataParser;
+import org.apache.iceberg.actions.BaseRemoveFilesActionResult;
+import org.apache.iceberg.actions.RemoveFiles;
+import org.apache.iceberg.exceptions.NotFoundException;
+import org.apache.iceberg.hadoop.HadoopFileIO;
+import org.apache.iceberg.io.FileIO;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.spark.JobGroupInfo;
+import org.apache.iceberg.util.PropertyUtil;
+import org.apache.iceberg.util.Tasks;
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Encoders;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.SparkSession;
+import org.apache.spark.sql.functions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * An implementation of {@link RemoveFiles} that uses metadata tables in Spark
+ * to determine which files should be deleted.
+ */
+@SuppressWarnings("UnnecessaryAnonymousClass")
+public class BaseRemoveFilesSparkAction
+    extends BaseSparkAction<RemoveFiles, RemoveFiles.Result> implements RemoveFiles {
+  private static final Logger LOG = LoggerFactory.getLogger(BaseRemoveFilesSparkAction.class);
+
+  private static final String DATA_FILE = "Data File";
+  private static final String MANIFEST = "Manifest";
+  private static final String MANIFEST_LIST = "Manifest List";
+  private static final String OTHERS = "Others";
+
+  private static final String STREAM_RESULTS = "stream-results";
+
+  // Creates an executor service that runs each task in the thread that invokes execute/submit.
+  private static final ExecutorService DEFAULT_DELETE_EXECUTOR_SERVICE = null;
+
+  private final String metadataLocation;
+  private final Consumer<String> defaultDelete = new Consumer<String>() {
+    @Override
+    public void accept(String file) {
+      io.deleteFile(file);
+    }
+  };
+
+  private Consumer<String> removeFunc = defaultDelete;
+  private ExecutorService removeExecutorService = DEFAULT_DELETE_EXECUTOR_SERVICE;
+  private FileIO io = new HadoopFileIO();
+
+  public BaseRemoveFilesSparkAction(SparkSession spark, String metadataLocation) {
+    super(spark);
+    this.metadataLocation = metadataLocation;
+  }
+
+  @Override
+  protected RemoveFiles self() {
+    return this;
+  }
+
+  @Override
+  public Result execute() {
+    if (io == null) {
+      throw new RuntimeException("IO needs to be set for removing the files");
+    }
+    String msg = String.format("Removing files reachable from %s", metadataLocation);
+    JobGroupInfo info = newJobGroupInfo("REMOVE-FILES", msg);
+    return withJobGroupInfo(info, this::doExecute);
+  }
+
+  private Result doExecute() {
+    boolean streamResults = PropertyUtil.propertyAsBoolean(options(), STREAM_RESULTS, false);
+    TableMetadata metadata = TableMetadataParser.read(io, metadataLocation);
+    Dataset<Row> validFileDF = buildValidFileDF(metadata).distinct();
+    if (streamResults) {
+      return deleteFiles(validFileDF.toLocalIterator());
+    } else {
+      return deleteFiles(validFileDF.collectAsList().iterator());
+    }
+  }
+
+  private Dataset<Row> projectFilePathWithType(Dataset<Row> ds, String type) {
+    return ds.select(functions.col("file_path"), functions.lit(type).as("file_type"));
+  }
+
+  private Dataset<Row> buildValidFileDF(TableMetadata metadata) {
+    Table staticTable = newStaticTable(metadata, io);
+    return projectFilePathWithType(buildValidDataFileDF(staticTable), DATA_FILE)
+        .union(projectFilePathWithType(buildManifestFileDF(staticTable), MANIFEST))
+        .union(projectFilePathWithType(buildManifestListDF(staticTable), MANIFEST_LIST))
+        .union(projectFilePathWithType(buildOtherMetadataFileDF(staticTable), OTHERS));
+  }
+
+  @Override
+  protected Dataset<Row> buildOtherMetadataFileDF(Table table) {
+    List<String> otherMetadataFiles = Lists.newArrayList();
+    otherMetadataFiles.addAll(ReachableFileUtil.metadataFileLocations(table, true));
+    // otherMetadataFiles.add(ReachableFileUtil.versionHintLocation(table));

Review comment:
       Why is it commented?

##########
File path: api/src/main/java/org/apache/iceberg/actions/ActionsProvider.java
##########
@@ -67,4 +67,11 @@ default RewriteDataFiles rewriteDataFiles(Table table) {
   default ExpireSnapshots expireSnapshots(Table table) {
     throw new UnsupportedOperationException(this.getClass().getName() + " does not implement expireSnapshots");
   }
+
+  /**
+   * Instantiates an action to remove all the files referenced by given metadata location.
+   */
+  default RemoveFiles removeFiles(String metadataLocation) {
+    throw new UnsupportedOperationException(this.getClass().getName() + " does not implement expireSnapshots");

Review comment:
       Looks like there is a typo: `expireSnapshots` -> `removeFiles` or whatever name we go with.

##########
File path: api/src/main/java/org/apache/iceberg/actions/RemoveFiles.java
##########
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.actions;
+
+import java.util.concurrent.ExecutorService;
+import java.util.function.Consumer;
+import org.apache.iceberg.io.FileIO;
+
+/**
+ * An action that removes all files referenced by a table metadata file.
+ * <p>
+ * This action will irreversibly delete all reachable files such as data files, manifests,
+ * manifest lists and should be used to clean up the underlying storage once a table is dropped
+ * and no longer needed.
+ * <p>
+ * Implementations may use a query engine to distribute parts of work.
+ */
+public interface RemoveFiles extends Action<RemoveFiles, RemoveFiles.Result> {
+
+  /**
+   * Passes an alternative delete implementation that will be used for files.
+   *
+   * @param removeFunc a function that will be called to delete files.
+   *                   The function accepts path to file as an argument.
+   * @return this for method chaining
+   */
+  RemoveFiles removeWith(Consumer<String> removeFunc);
+
+  /**
+   * Passes an alternative executor service that will be used for files removal.
+   * <p>
+   * If this method is not called, files will still be deleted in the current thread.
+   * <p>

Review comment:
       nit: unnecessary `<p>`?

##########
File path: api/src/main/java/org/apache/iceberg/actions/RemoveFiles.java
##########
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.actions;
+
+import java.util.concurrent.ExecutorService;
+import java.util.function.Consumer;
+import org.apache.iceberg.io.FileIO;
+
+/**
+ * An action that removes all files referenced by a table metadata file.
+ * <p>
+ * This action will irreversibly delete all reachable files such as data files, manifests,
+ * manifest lists and should be used to clean up the underlying storage once a table is dropped
+ * and no longer needed.
+ * <p>
+ * Implementations may use a query engine to distribute parts of work.
+ */
+public interface RemoveFiles extends Action<RemoveFiles, RemoveFiles.Result> {
+
+  /**
+   * Passes an alternative delete implementation that will be used for files.
+   *
+   * @param removeFunc a function that will be called to delete files.
+   *                   The function accepts path to file as an argument.
+   * @return this for method chaining
+   */
+  RemoveFiles removeWith(Consumer<String> removeFunc);

Review comment:
       There is nothing wrong with this name but we call it `deleteWith` in 2 other actions and in some places in the Table API. I think it is better to be consistent and call it `deleteWith` simply because we already use that name in other places and users are familiar with it.

##########
File path: spark3/src/test/java/org/apache/iceberg/actions/TestRemoveFilesAction.java
##########
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.actions;
+
+import java.io.File;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.iceberg.AssertHelpers;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DataFiles;
+import org.apache.iceberg.HasTableOperations;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TableProperties;
+import org.apache.iceberg.TestHelpers;
+import org.apache.iceberg.hadoop.HadoopTables;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.spark.SparkTestBase;
+import org.apache.iceberg.spark.actions.SparkActions;
+import org.apache.iceberg.types.Types;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.apache.iceberg.types.Types.NestedField.optional;
+
+public class TestRemoveFilesAction extends SparkTestBase {
+  private static final HadoopTables TABLES = new HadoopTables(new Configuration());
+  private static final Schema SCHEMA = new Schema(
+      optional(1, "c1", Types.IntegerType.get()),
+      optional(2, "c2", Types.StringType.get()),
+      optional(3, "c3", Types.StringType.get())
+  );
+  private static final int SHUFFLE_PARTITIONS = 2;
+
+  private static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA).identity("c1").build();
+
+  static final DataFile FILE_A = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-a.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(0))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_B = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-b.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(1))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_C = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-c.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(2))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_D = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-d.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(3))
+      .withRecordCount(1)
+      .build();
+
+  @Rule
+  public TemporaryFolder temp = new TemporaryFolder();
+
+  private Table table;
+
+  @Before
+  public void setupTableLocation() throws Exception {
+    File tableDir = temp.newFolder();
+    String tableLocation = tableDir.toURI().toString();
+    this.table = TABLES.create(SCHEMA, SPEC, Maps.newHashMap(), tableLocation);
+    spark.conf().set("spark.sql.shuffle.partitions", SHUFFLE_PARTITIONS);
+  }
+
+  private void checkRemoveFilesResults(long expectedDatafiles, long expectedManifestsDeleted,
+                                       long expectedManifestListsDeleted, long expectedOtherFilesDeleted,
+                                       RemoveFiles.Result results) {
+    Assert.assertEquals("Incorrect number of manifest files deleted",
+        expectedManifestsDeleted,  results.removedManifestsCount());
+    Assert.assertEquals("Incorrect number of datafiles deleted",
+        expectedDatafiles, results.removedDataFilesCount());
+    Assert.assertEquals("Incorrect number of manifest lists deleted",
+        expectedManifestListsDeleted, results.removedManifestListsCount());
+    Assert.assertEquals("Incorrect number of other lists deleted",
+        expectedOtherFilesDeleted - 1, results.otherRemovedFilesCount());
+  }
+
+  @Test
+  public void dataFilesCleanupWithParallelTasks() {
+    table.newFastAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    table.newFastAppend()
+        .appendFile(FILE_B)
+        .commit();
+
+    table.newRewrite()
+        .rewriteFiles(ImmutableSet.of(FILE_B), ImmutableSet.of(FILE_D))
+        .commit();
+
+    table.newRewrite()
+        .rewriteFiles(ImmutableSet.of(FILE_A), ImmutableSet.of(FILE_C))
+        .commit();
+
+    Set<String> deletedFiles = Sets.newHashSet();
+    Set<String> deleteThreads = ConcurrentHashMap.newKeySet();
+    AtomicInteger deleteThreadsIndex = new AtomicInteger(0);
+
+    RemoveFiles.Result result = getSparkActions().removeFiles(getMetadataLocation(table))
+        .io(table.io())
+        .executeRemoveWith(Executors.newFixedThreadPool(4, runnable -> {
+          Thread thread = new Thread(runnable);
+          thread.setName("remove-files-" + deleteThreadsIndex.getAndIncrement());
+          thread.setDaemon(true); // daemon threads will be terminated abruptly when the JVM exits
+          return thread;
+        }))
+        .removeWith(s -> {
+          deleteThreads.add(Thread.currentThread().getName());
+          deletedFiles.add(s);
+        })
+        .execute();
+
+    // Verifies that the delete methods ran in the threads created by the provided ExecutorService ThreadFactory
+    Assert.assertEquals(deleteThreads,
+        Sets.newHashSet("remove-files-0", "remove-files-1", "remove-files-2", "remove-files-3"));
+
+    Assert.assertTrue("FILE_A should be deleted", deletedFiles.contains(FILE_A.path().toString()));
+    Assert.assertTrue("FILE_B should be deleted", deletedFiles.contains(FILE_B.path().toString()));
+
+    checkRemoveFilesResults(4L, 6L, 4L, 6, result);
+  }
+
+  @Test
+  public void testWithExpiringDanglingStageCommit() {
+    table.location();
+    // `A` commit
+    table.newAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    // `B` staged commit
+    table.newAppend()
+        .appendFile(FILE_B)
+        .stageOnly()
+        .commit();
+
+    // `C` commit
+    table.newAppend()
+        .appendFile(FILE_C)
+        .commit();
+
+    RemoveFiles.Result result = getSparkActions().removeFiles(getMetadataLocation(table))
+        .io(table.io())
+        .execute();
+
+    checkRemoveFilesResults(3L, 3L, 3L, 5, result);
+  }
+
+  @Test
+  public void testRemoveFileActionOnEmptyTable() {
+    RemoveFiles.Result result = getSparkActions().removeFiles(getMetadataLocation(table))
+        .io(table.io())
+        .execute();
+
+    checkRemoveFilesResults(0, 0, 0, 2, result);
+  }
+
+  @Test
+  public void testRemoveFilesActionWithReducedVersionsTable() {
+    table.updateProperties()
+        .set(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, "2").commit();
+    table.newAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_B)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_B)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_C)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_D)
+        .commit();
+
+    RemoveFiles baseRemoveFilesSparkAction = getSparkActions()
+        .removeFiles(getMetadataLocation(table))
+        .io(table.io());
+    RemoveFiles.Result result = baseRemoveFilesSparkAction.execute();
+
+    checkRemoveFilesResults(4, 5, 5, 8, result);
+  }
+
+  @Test
+  public void testRemoveFilesAction() {
+    table.newAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_B)
+        .commit();
+
+    RemoveFiles baseRemoveFilesSparkAction = getSparkActions()
+        .removeFiles(getMetadataLocation(table))
+        .io(table.io());
+    RemoveFiles.Result result = baseRemoveFilesSparkAction.execute();
+
+    checkRemoveFilesResults(2, 2, 2, 4,  result);
+  }
+
+  @Test
+  public void testUseLocalIterator() {
+    table.newFastAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    table.newOverwrite()
+        .deleteFile(FILE_A)
+        .addFile(FILE_B)
+        .commit();
+
+    table.newFastAppend()
+        .appendFile(FILE_C)
+        .commit();
+
+    int jobsBefore = spark.sparkContext().dagScheduler().nextJobId().get();
+
+    RemoveFiles.Result results = getSparkActions().removeFiles(getMetadataLocation(table))
+        .io(table.io())
+        .option("stream-results", "true").execute();
+
+    int jobsAfter = spark.sparkContext().dagScheduler().nextJobId().get();
+    int totalJobsRun = jobsAfter - jobsBefore;
+
+    checkRemoveFilesResults(3L, 4L, 3L, 5, results);
+
+    Assert.assertEquals(
+        "Expected total jobs to be equal to total number of shuffle partitions", totalJobsRun, SHUFFLE_PARTITIONS);
+  }
+
+  @Test
+  public void testIgnoreMetadataFilesNotFound() {
+    table.updateProperties()
+        .set(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, "1").commit();
+
+    table.newAppend()
+        .appendFile(FILE_A)
+        .commit();
+    // There are three metadata json files at this point
+    List<String> result = Actions.forTable(table)
+        .removeOrphanFiles()
+        .olderThan(System.currentTimeMillis()).execute();
+
+    Assert.assertEquals("Should delete 1 file", 1, result.size());
+    Assert.assertTrue("Should remove v1 file", result.get(0).contains("v1.metadata.json"));
+
+    RemoveFiles baseRemoveFilesSparkAction = getSparkActions()
+        .removeFiles(getMetadataLocation(table))
+        .io(table.io());
+    RemoveFiles.Result res = baseRemoveFilesSparkAction.execute();
+
+    checkRemoveFilesResults(1, 1, 1, 4,  res);
+  }
+
+  @Test
+  public void testEmptyIOThrowsException() {
+    RemoveFiles baseRemoveFilesSparkAction = getSparkActions()
+        .removeFiles(getMetadataLocation(table))
+        .io(null);
+    AssertHelpers.assertThrows("FileIO needs to be set to use RemoveFiles action",
+        RuntimeException.class, "IO needs to be set for removing the files",
+        baseRemoveFilesSparkAction::execute);
+  }
+
+  private String getMetadataLocation(Table tbl) {

Review comment:
       nit: just `metadataLocation`

##########
File path: spark3/src/test/java/org/apache/iceberg/actions/TestRemoveFilesAction.java
##########
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.actions;
+
+import java.io.File;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.iceberg.AssertHelpers;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DataFiles;
+import org.apache.iceberg.HasTableOperations;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TableProperties;
+import org.apache.iceberg.TestHelpers;
+import org.apache.iceberg.hadoop.HadoopTables;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.spark.SparkTestBase;
+import org.apache.iceberg.spark.actions.SparkActions;
+import org.apache.iceberg.types.Types;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.apache.iceberg.types.Types.NestedField.optional;
+
+public class TestRemoveFilesAction extends SparkTestBase {
+  private static final HadoopTables TABLES = new HadoopTables(new Configuration());
+  private static final Schema SCHEMA = new Schema(
+      optional(1, "c1", Types.IntegerType.get()),
+      optional(2, "c2", Types.StringType.get()),
+      optional(3, "c3", Types.StringType.get())
+  );
+  private static final int SHUFFLE_PARTITIONS = 2;
+
+  private static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA).identity("c1").build();
+
+  static final DataFile FILE_A = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-a.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(0))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_B = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-b.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(1))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_C = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-c.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(2))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_D = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-d.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(3))
+      .withRecordCount(1)
+      .build();
+
+  @Rule
+  public TemporaryFolder temp = new TemporaryFolder();
+
+  private Table table;
+
+  @Before
+  public void setupTableLocation() throws Exception {
+    File tableDir = temp.newFolder();
+    String tableLocation = tableDir.toURI().toString();
+    this.table = TABLES.create(SCHEMA, SPEC, Maps.newHashMap(), tableLocation);
+    spark.conf().set("spark.sql.shuffle.partitions", SHUFFLE_PARTITIONS);
+  }
+
+  private void checkRemoveFilesResults(long expectedDatafiles, long expectedManifestsDeleted,
+                                       long expectedManifestListsDeleted, long expectedOtherFilesDeleted,
+                                       RemoveFiles.Result results) {
+    Assert.assertEquals("Incorrect number of manifest files deleted",
+        expectedManifestsDeleted,  results.removedManifestsCount());
+    Assert.assertEquals("Incorrect number of datafiles deleted",
+        expectedDatafiles, results.removedDataFilesCount());
+    Assert.assertEquals("Incorrect number of manifest lists deleted",
+        expectedManifestListsDeleted, results.removedManifestListsCount());
+    Assert.assertEquals("Incorrect number of other lists deleted",
+        expectedOtherFilesDeleted - 1, results.otherRemovedFilesCount());
+  }
+
+  @Test
+  public void dataFilesCleanupWithParallelTasks() {
+    table.newFastAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    table.newFastAppend()
+        .appendFile(FILE_B)
+        .commit();
+
+    table.newRewrite()
+        .rewriteFiles(ImmutableSet.of(FILE_B), ImmutableSet.of(FILE_D))
+        .commit();
+
+    table.newRewrite()
+        .rewriteFiles(ImmutableSet.of(FILE_A), ImmutableSet.of(FILE_C))
+        .commit();
+
+    Set<String> deletedFiles = Sets.newHashSet();
+    Set<String> deleteThreads = ConcurrentHashMap.newKeySet();
+    AtomicInteger deleteThreadsIndex = new AtomicInteger(0);
+
+    RemoveFiles.Result result = getSparkActions().removeFiles(getMetadataLocation(table))
+        .io(table.io())
+        .executeRemoveWith(Executors.newFixedThreadPool(4, runnable -> {
+          Thread thread = new Thread(runnable);
+          thread.setName("remove-files-" + deleteThreadsIndex.getAndIncrement());
+          thread.setDaemon(true); // daemon threads will be terminated abruptly when the JVM exits
+          return thread;
+        }))
+        .removeWith(s -> {
+          deleteThreads.add(Thread.currentThread().getName());
+          deletedFiles.add(s);
+        })
+        .execute();
+
+    // Verifies that the delete methods ran in the threads created by the provided ExecutorService ThreadFactory
+    Assert.assertEquals(deleteThreads,
+        Sets.newHashSet("remove-files-0", "remove-files-1", "remove-files-2", "remove-files-3"));
+
+    Assert.assertTrue("FILE_A should be deleted", deletedFiles.contains(FILE_A.path().toString()));
+    Assert.assertTrue("FILE_B should be deleted", deletedFiles.contains(FILE_B.path().toString()));
+
+    checkRemoveFilesResults(4L, 6L, 4L, 6, result);
+  }
+
+  @Test
+  public void testWithExpiringDanglingStageCommit() {
+    table.location();
+    // `A` commit
+    table.newAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    // `B` staged commit
+    table.newAppend()
+        .appendFile(FILE_B)
+        .stageOnly()
+        .commit();
+
+    // `C` commit
+    table.newAppend()
+        .appendFile(FILE_C)
+        .commit();
+
+    RemoveFiles.Result result = getSparkActions().removeFiles(getMetadataLocation(table))
+        .io(table.io())
+        .execute();
+
+    checkRemoveFilesResults(3L, 3L, 3L, 5, result);
+  }
+
+  @Test
+  public void testRemoveFileActionOnEmptyTable() {
+    RemoveFiles.Result result = getSparkActions().removeFiles(getMetadataLocation(table))
+        .io(table.io())
+        .execute();
+
+    checkRemoveFilesResults(0, 0, 0, 2, result);
+  }
+
+  @Test
+  public void testRemoveFilesActionWithReducedVersionsTable() {
+    table.updateProperties()
+        .set(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, "2").commit();
+    table.newAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_B)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_B)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_C)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_D)
+        .commit();
+
+    RemoveFiles baseRemoveFilesSparkAction = getSparkActions()
+        .removeFiles(getMetadataLocation(table))
+        .io(table.io());
+    RemoveFiles.Result result = baseRemoveFilesSparkAction.execute();
+
+    checkRemoveFilesResults(4, 5, 5, 8, result);
+  }
+
+  @Test
+  public void testRemoveFilesAction() {
+    table.newAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_B)
+        .commit();
+
+    RemoveFiles baseRemoveFilesSparkAction = getSparkActions()
+        .removeFiles(getMetadataLocation(table))
+        .io(table.io());
+    RemoveFiles.Result result = baseRemoveFilesSparkAction.execute();
+
+    checkRemoveFilesResults(2, 2, 2, 4,  result);
+  }
+
+  @Test
+  public void testUseLocalIterator() {
+    table.newFastAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    table.newOverwrite()
+        .deleteFile(FILE_A)
+        .addFile(FILE_B)
+        .commit();
+
+    table.newFastAppend()
+        .appendFile(FILE_C)
+        .commit();
+
+    int jobsBefore = spark.sparkContext().dagScheduler().nextJobId().get();
+
+    RemoveFiles.Result results = getSparkActions().removeFiles(getMetadataLocation(table))
+        .io(table.io())
+        .option("stream-results", "true").execute();
+
+    int jobsAfter = spark.sparkContext().dagScheduler().nextJobId().get();
+    int totalJobsRun = jobsAfter - jobsBefore;
+
+    checkRemoveFilesResults(3L, 4L, 3L, 5, results);
+
+    Assert.assertEquals(
+        "Expected total jobs to be equal to total number of shuffle partitions", totalJobsRun, SHUFFLE_PARTITIONS);
+  }
+
+  @Test
+  public void testIgnoreMetadataFilesNotFound() {
+    table.updateProperties()
+        .set(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, "1").commit();
+
+    table.newAppend()
+        .appendFile(FILE_A)
+        .commit();
+    // There are three metadata json files at this point
+    List<String> result = Actions.forTable(table)
+        .removeOrphanFiles()
+        .olderThan(System.currentTimeMillis()).execute();
+
+    Assert.assertEquals("Should delete 1 file", 1, result.size());
+    Assert.assertTrue("Should remove v1 file", result.get(0).contains("v1.metadata.json"));
+
+    RemoveFiles baseRemoveFilesSparkAction = getSparkActions()
+        .removeFiles(getMetadataLocation(table))
+        .io(table.io());
+    RemoveFiles.Result res = baseRemoveFilesSparkAction.execute();
+
+    checkRemoveFilesResults(1, 1, 1, 4,  res);
+  }
+
+  @Test
+  public void testEmptyIOThrowsException() {
+    RemoveFiles baseRemoveFilesSparkAction = getSparkActions()
+        .removeFiles(getMetadataLocation(table))
+        .io(null);
+    AssertHelpers.assertThrows("FileIO needs to be set to use RemoveFiles action",
+        RuntimeException.class, "IO needs to be set for removing the files",
+        baseRemoveFilesSparkAction::execute);
+  }
+
+  private String getMetadataLocation(Table tbl) {
+    return  ((HasTableOperations) tbl).operations().current().metadataFileLocation();
+  }
+
+  SparkActions getSparkActions() {

Review comment:
       nit: just `sparkActions`

##########
File path: spark3/src/main/java/org/apache/iceberg/spark/actions/SparkActions.java
##########
@@ -62,4 +63,9 @@ public MigrateTable migrateTable(String tableIdent) {
     CatalogAndIdentifier catalogAndIdent = Spark3Util.catalogAndIdentifier(ctx, spark(), tableIdent, defaultCatalog);
     return new BaseMigrateTableSparkAction(spark(), catalogAndIdent.catalog(), catalogAndIdent.identifier());
   }
+
+  @Override
+  public RemoveFiles removeFiles(String metadataLocation) {
+    return new BaseRemoveFilesSparkAction(spark(), metadataLocation);

Review comment:
       Isn't it enough to just override it in `BaseSparkActions`?

##########
File path: spark3/src/test/java/org/apache/iceberg/actions/TestRemoveFilesAction.java
##########
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.actions;
+
+import java.io.File;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.iceberg.AssertHelpers;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DataFiles;
+import org.apache.iceberg.HasTableOperations;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TableProperties;
+import org.apache.iceberg.TestHelpers;
+import org.apache.iceberg.hadoop.HadoopTables;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.spark.SparkTestBase;
+import org.apache.iceberg.spark.actions.SparkActions;
+import org.apache.iceberg.types.Types;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.apache.iceberg.types.Types.NestedField.optional;
+
+public class TestRemoveFilesAction extends SparkTestBase {
+  private static final HadoopTables TABLES = new HadoopTables(new Configuration());
+  private static final Schema SCHEMA = new Schema(
+      optional(1, "c1", Types.IntegerType.get()),
+      optional(2, "c2", Types.StringType.get()),
+      optional(3, "c3", Types.StringType.get())
+  );
+  private static final int SHUFFLE_PARTITIONS = 2;
+
+  private static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA).identity("c1").build();
+
+  static final DataFile FILE_A = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-a.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(0))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_B = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-b.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(1))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_C = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-c.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(2))
+      .withRecordCount(1)
+      .build();
+  static final DataFile FILE_D = DataFiles.builder(SPEC)
+      .withPath("/path/to/data-d.parquet")
+      .withFileSizeInBytes(10)
+      .withPartition(TestHelpers.Row.of(3))
+      .withRecordCount(1)
+      .build();
+
+  @Rule
+  public TemporaryFolder temp = new TemporaryFolder();
+
+  private Table table;
+
+  @Before
+  public void setupTableLocation() throws Exception {
+    File tableDir = temp.newFolder();
+    String tableLocation = tableDir.toURI().toString();
+    this.table = TABLES.create(SCHEMA, SPEC, Maps.newHashMap(), tableLocation);
+    spark.conf().set("spark.sql.shuffle.partitions", SHUFFLE_PARTITIONS);
+  }
+
+  private void checkRemoveFilesResults(long expectedDatafiles, long expectedManifestsDeleted,
+                                       long expectedManifestListsDeleted, long expectedOtherFilesDeleted,
+                                       RemoveFiles.Result results) {
+    Assert.assertEquals("Incorrect number of manifest files deleted",
+        expectedManifestsDeleted,  results.removedManifestsCount());
+    Assert.assertEquals("Incorrect number of datafiles deleted",
+        expectedDatafiles, results.removedDataFilesCount());
+    Assert.assertEquals("Incorrect number of manifest lists deleted",
+        expectedManifestListsDeleted, results.removedManifestListsCount());
+    Assert.assertEquals("Incorrect number of other lists deleted",
+        expectedOtherFilesDeleted - 1, results.otherRemovedFilesCount());
+  }
+
+  @Test
+  public void dataFilesCleanupWithParallelTasks() {
+    table.newFastAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    table.newFastAppend()
+        .appendFile(FILE_B)
+        .commit();
+
+    table.newRewrite()
+        .rewriteFiles(ImmutableSet.of(FILE_B), ImmutableSet.of(FILE_D))
+        .commit();
+
+    table.newRewrite()
+        .rewriteFiles(ImmutableSet.of(FILE_A), ImmutableSet.of(FILE_C))
+        .commit();
+
+    Set<String> deletedFiles = Sets.newHashSet();
+    Set<String> deleteThreads = ConcurrentHashMap.newKeySet();
+    AtomicInteger deleteThreadsIndex = new AtomicInteger(0);
+
+    RemoveFiles.Result result = getSparkActions().removeFiles(getMetadataLocation(table))
+        .io(table.io())
+        .executeRemoveWith(Executors.newFixedThreadPool(4, runnable -> {
+          Thread thread = new Thread(runnable);
+          thread.setName("remove-files-" + deleteThreadsIndex.getAndIncrement());
+          thread.setDaemon(true); // daemon threads will be terminated abruptly when the JVM exits
+          return thread;
+        }))
+        .removeWith(s -> {
+          deleteThreads.add(Thread.currentThread().getName());
+          deletedFiles.add(s);
+        })
+        .execute();
+
+    // Verifies that the delete methods ran in the threads created by the provided ExecutorService ThreadFactory
+    Assert.assertEquals(deleteThreads,
+        Sets.newHashSet("remove-files-0", "remove-files-1", "remove-files-2", "remove-files-3"));
+
+    Assert.assertTrue("FILE_A should be deleted", deletedFiles.contains(FILE_A.path().toString()));
+    Assert.assertTrue("FILE_B should be deleted", deletedFiles.contains(FILE_B.path().toString()));
+
+    checkRemoveFilesResults(4L, 6L, 4L, 6, result);
+  }
+
+  @Test
+  public void testWithExpiringDanglingStageCommit() {
+    table.location();
+    // `A` commit
+    table.newAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    // `B` staged commit
+    table.newAppend()
+        .appendFile(FILE_B)
+        .stageOnly()
+        .commit();
+
+    // `C` commit
+    table.newAppend()
+        .appendFile(FILE_C)
+        .commit();
+
+    RemoveFiles.Result result = getSparkActions().removeFiles(getMetadataLocation(table))
+        .io(table.io())
+        .execute();
+
+    checkRemoveFilesResults(3L, 3L, 3L, 5, result);
+  }
+
+  @Test
+  public void testRemoveFileActionOnEmptyTable() {
+    RemoveFiles.Result result = getSparkActions().removeFiles(getMetadataLocation(table))
+        .io(table.io())
+        .execute();
+
+    checkRemoveFilesResults(0, 0, 0, 2, result);
+  }
+
+  @Test
+  public void testRemoveFilesActionWithReducedVersionsTable() {
+    table.updateProperties()
+        .set(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, "2").commit();
+    table.newAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_B)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_B)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_C)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_D)
+        .commit();
+
+    RemoveFiles baseRemoveFilesSparkAction = getSparkActions()
+        .removeFiles(getMetadataLocation(table))
+        .io(table.io());
+    RemoveFiles.Result result = baseRemoveFilesSparkAction.execute();
+
+    checkRemoveFilesResults(4, 5, 5, 8, result);
+  }
+
+  @Test
+  public void testRemoveFilesAction() {
+    table.newAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    table.newAppend()
+        .appendFile(FILE_B)
+        .commit();
+
+    RemoveFiles baseRemoveFilesSparkAction = getSparkActions()
+        .removeFiles(getMetadataLocation(table))
+        .io(table.io());
+    RemoveFiles.Result result = baseRemoveFilesSparkAction.execute();
+
+    checkRemoveFilesResults(2, 2, 2, 4,  result);
+  }
+
+  @Test
+  public void testUseLocalIterator() {
+    table.newFastAppend()
+        .appendFile(FILE_A)
+        .commit();
+
+    table.newOverwrite()
+        .deleteFile(FILE_A)
+        .addFile(FILE_B)
+        .commit();
+
+    table.newFastAppend()
+        .appendFile(FILE_C)
+        .commit();
+
+    int jobsBefore = spark.sparkContext().dagScheduler().nextJobId().get();
+
+    RemoveFiles.Result results = getSparkActions().removeFiles(getMetadataLocation(table))
+        .io(table.io())
+        .option("stream-results", "true").execute();
+
+    int jobsAfter = spark.sparkContext().dagScheduler().nextJobId().get();
+    int totalJobsRun = jobsAfter - jobsBefore;
+
+    checkRemoveFilesResults(3L, 4L, 3L, 5, results);
+
+    Assert.assertEquals(
+        "Expected total jobs to be equal to total number of shuffle partitions", totalJobsRun, SHUFFLE_PARTITIONS);
+  }
+
+  @Test
+  public void testIgnoreMetadataFilesNotFound() {
+    table.updateProperties()
+        .set(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, "1").commit();
+
+    table.newAppend()
+        .appendFile(FILE_A)
+        .commit();
+    // There are three metadata json files at this point
+    List<String> result = Actions.forTable(table)
+        .removeOrphanFiles()
+        .olderThan(System.currentTimeMillis()).execute();
+
+    Assert.assertEquals("Should delete 1 file", 1, result.size());
+    Assert.assertTrue("Should remove v1 file", result.get(0).contains("v1.metadata.json"));
+
+    RemoveFiles baseRemoveFilesSparkAction = getSparkActions()
+        .removeFiles(getMetadataLocation(table))
+        .io(table.io());
+    RemoveFiles.Result res = baseRemoveFilesSparkAction.execute();
+
+    checkRemoveFilesResults(1, 1, 1, 4,  res);
+  }
+
+  @Test
+  public void testEmptyIOThrowsException() {
+    RemoveFiles baseRemoveFilesSparkAction = getSparkActions()
+        .removeFiles(getMetadataLocation(table))
+        .io(null);
+    AssertHelpers.assertThrows("FileIO needs to be set to use RemoveFiles action",
+        RuntimeException.class, "IO needs to be set for removing the files",
+        baseRemoveFilesSparkAction::execute);
+  }
+
+  private String getMetadataLocation(Table tbl) {
+    return  ((HasTableOperations) tbl).operations().current().metadataFileLocation();

Review comment:
       nit: extra space 




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@iceberg.apache.org
For additional commands, e-mail: issues-help@iceberg.apache.org