You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iceberg.apache.org by bl...@apache.org on 2022/11/06 21:06:53 UTC

[iceberg] branch master updated: Spark 3.0: Remove 3.0 from docs and builds (#6093)

This is an automated email from the ASF dual-hosted git repository.

blue pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iceberg.git


The following commit(s) were added to refs/heads/master by this push:
     new 396c6be686 Spark 3.0: Remove 3.0 from docs and builds (#6093)
396c6be686 is described below

commit 396c6be6869176edf7bf4c41e50feb35d660d5c1
Author: Ajantha Bhat <aj...@gmail.com>
AuthorDate: Mon Nov 7 02:36:47 2022 +0530

    Spark 3.0: Remove 3.0 from docs and builds (#6093)
---
 .github/labeler.yml                                |  1 -
 .github/workflows/java-ci.yml                      |  2 +-
 .github/workflows/publish-snapshot.yml             |  2 +-
 .github/workflows/spark-ci.yml                     |  2 +-
 .gitignore                                         |  1 -
 README.md                                          |  3 +--
 dev/stage-binaries.sh                              |  2 +-
 docs/aws.md                                        | 20 +++++++--------
 docs/java-api.md                                   |  5 +---
 docs/nessie.md                                     |  8 +++---
 docs/spark-configuration.md                        |  4 +--
 docs/spark-ddl.md                                  |  6 ++---
 docs/spark-procedures.md                           |  2 +-
 docs/spark-queries.md                              |  6 ++---
 docs/spark-structured-streaming.md                 |  8 +++---
 docs/spark-writes.md                               | 30 ++++++++--------------
 gradle.properties                                  |  2 +-
 jmh.gradle                                         |  4 ---
 settings.gradle                                    | 12 ---------
 spark/build.gradle                                 |  4 ---
 .../java/org/apache/iceberg/actions/Actions.java   |  8 +++---
 .../iceberg/actions/RewriteDataFilesAction.java    |  2 +-
 .../org/apache/iceberg/actions/SparkActions.java   |  2 +-
 .../java/org/apache/iceberg/examples/README.md     |  2 +-
 .../iceberg/spark/source/TestDataFrameWrites.java  |  4 +--
 .../iceberg/spark/source/TestSparkDataWrite.java   |  4 +--
 .../iceberg/spark/source/TestDataFrameWrites.java  |  4 +--
 .../iceberg/spark/source/TestSparkDataWrite.java   |  4 +--
 .../iceberg/spark/source/TestDataFrameWrites.java  |  4 +--
 .../iceberg/spark/source/TestSparkDataWrite.java   |  4 +--
 .../iceberg/spark/source/TestDataFrameWrites.java  |  4 +--
 .../iceberg/spark/source/TestSparkDataWrite.java   |  4 +--
 32 files changed, 68 insertions(+), 102 deletions(-)

diff --git a/.github/labeler.yml b/.github/labeler.yml
index 55fd64b479..521e1a42aa 100644
--- a/.github/labeler.yml
+++ b/.github/labeler.yml
@@ -61,7 +61,6 @@ DATA:
   - data/**/*
 SPARK:
   - spark-runtime/**/*
-  - spark3-runtime/**/*
   - spark/**/*
   - spark2/**/*
   - spark3/**/*
diff --git a/.github/workflows/java-ci.yml b/.github/workflows/java-ci.yml
index 7cf2d8abe3..e7e912e751 100644
--- a/.github/workflows/java-ci.yml
+++ b/.github/workflows/java-ci.yml
@@ -88,7 +88,7 @@ jobs:
       with:
         distribution: zulu
         java-version: 8
-    - run: ./gradlew -DflinkVersions=1.14,1.15,1.16 -DsparkVersions=2.4,3.0,3.1,3.2,3.3 -DhiveVersions=2,3 build -x test -x javadoc -x integrationTest
+    - run: ./gradlew -DflinkVersions=1.14,1.15,1.16 -DsparkVersions=2.4,3.1,3.2,3.3 -DhiveVersions=2,3 build -x test -x javadoc -x integrationTest
 
   build-javadoc:
     runs-on: ubuntu-20.04
diff --git a/.github/workflows/publish-snapshot.yml b/.github/workflows/publish-snapshot.yml
index b9b311c5be..b6a5771127 100644
--- a/.github/workflows/publish-snapshot.yml
+++ b/.github/workflows/publish-snapshot.yml
@@ -40,5 +40,5 @@ jobs:
           java-version: 8
       - run: |
           ./gradlew printVersion
-          ./gradlew -DflinkVersions=1.14,1.15,1.16 -DsparkVersions=2.4,3.0,3.1,3.2,3.3 -DhiveVersions=2,3 publishApachePublicationToMavenRepository -PmavenUser=${{ secrets.NEXUS_USER }} -PmavenPassword=${{ secrets.NEXUS_PW }}
+          ./gradlew -DflinkVersions=1.14,1.15,1.16 -DsparkVersions=2.4,3.1,3.2,3.3 -DhiveVersions=2,3 publishApachePublicationToMavenRepository -PmavenUser=${{ secrets.NEXUS_USER }} -PmavenPassword=${{ secrets.NEXUS_PW }}
           ./gradlew -DflinkVersions= -DsparkVersions=3.2,3.3 -DscalaVersion=2.13 -DhiveVersions= publishApachePublicationToMavenRepository -PmavenUser=${{ secrets.NEXUS_USER }} -PmavenPassword=${{ secrets.NEXUS_PW }}
diff --git a/.github/workflows/spark-ci.yml b/.github/workflows/spark-ci.yml
index f0a45908e3..eb8abb6103 100644
--- a/.github/workflows/spark-ci.yml
+++ b/.github/workflows/spark-ci.yml
@@ -87,7 +87,7 @@ jobs:
     strategy:
       matrix:
         jvm: [8, 11]
-        spark: ['3.0', '3.1', '3.2', '3.3']
+        spark: ['3.1', '3.2', '3.3']
     env:
       SPARK_LOCAL_IP: localhost
     steps:
diff --git a/.gitignore b/.gitignore
index 4aafb18e52..b9c93a1bc4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,7 +29,6 @@ site/site
 
 # benchmark output
 spark/v2.4/spark/benchmark/*
-spark/v3.0/spark/benchmark/*
 spark/v3.1/spark/benchmark/*
 spark/v3.2/spark/benchmark/*
 spark/v3.3/spark/benchmark/*
diff --git a/README.md b/README.md
index 6ae3cd8bea..6ad7222753 100644
--- a/README.md
+++ b/README.md
@@ -74,8 +74,7 @@ Iceberg table support is organized in library modules:
 
 Iceberg also has modules for adding Iceberg support to processing engines:
 
-* `iceberg-spark2` is an implementation of Spark's Datasource V2 API in 2.4 for Iceberg (use iceberg-spark-runtime for a shaded version)
-* `iceberg-spark3` is an implementation of Spark's Datasource V2 API in 3.0 for Iceberg (use iceberg-spark3-runtime for a shaded version)
+* `iceberg-spark` is an implementation of Spark's Datasource V2 API for Iceberg with submodules for each spark versions (use runtime jars for a shaded version)
 * `iceberg-flink` contains classes for integrating with Apache Flink (use iceberg-flink-runtime for a shaded version)
 * `iceberg-mr` contains an InputFormat and other classes for integrating with Apache Hive
 * `iceberg-pig` is an implementation of Pig's LoadFunc API for Iceberg
diff --git a/dev/stage-binaries.sh b/dev/stage-binaries.sh
index 200de547a4..3c1290b2d8 100755
--- a/dev/stage-binaries.sh
+++ b/dev/stage-binaries.sh
@@ -20,7 +20,7 @@
 
 SCALA_VERSION=2.12
 FLINK_VERSIONS=1.14,1.15,1.16
-SPARK_VERSIONS=2.4,3.0,3.1,3.2,3.3
+SPARK_VERSIONS=2.4,3.1,3.2,3.3
 HIVE_VERSIONS=2,3
 
 ./gradlew -Prelease -DscalaVersion=$SCALA_VERSION -DflinkVersions=$FLINK_VERSIONS -DsparkVersions=$SPARK_VERSIONS -DhiveVersions=$HIVE_VERSIONS publishApachePublicationToMavenRepository
diff --git a/docs/aws.md b/docs/aws.md
index a042137303..1e0b687a22 100644
--- a/docs/aws.md
+++ b/docs/aws.md
@@ -48,12 +48,12 @@ Here are some examples.
 
 ### Spark
 
-For example, to use AWS features with Spark 3.0 and AWS clients version 2.17.257, you can start the Spark SQL shell with:
+For example, to use AWS features with Spark 3.3 (with scala 2.12) and AWS clients version 2.17.257, you can start the Spark SQL shell with:
 
 ```sh
 # add Iceberg dependency
 ICEBERG_VERSION={{% icebergVersion %}}
-DEPENDENCIES="org.apache.iceberg:iceberg-spark3-runtime:$ICEBERG_VERSION"
+DEPENDENCIES="org.apache.iceberg:iceberg-spark-runtime-3.3_2.12:$ICEBERG_VERSION"
 
 # add AWS dependnecy
 AWS_SDK_VERSION=2.17.257
@@ -435,7 +435,7 @@ This is turned off by default.
 ### S3 Tags
 
 Custom [tags](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) can be added to S3 objects while writing and deleting.
-For example, to write S3 tags with Spark 3.0, you can start the Spark SQL shell with:
+For example, to write S3 tags with Spark 3.3, you can start the Spark SQL shell with:
 ```
 spark-sql --conf spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog \
     --conf spark.sql.catalog.my_catalog.warehouse=s3://my-bucket/my/key/prefix \
@@ -452,7 +452,7 @@ The property is set to `true` by default.
 
 With the `s3.delete.tags` config, objects are tagged with the configured key-value pairs before deletion.
 Users can configure tag-based object lifecycle policy at bucket level to transition objects to different tiers.
-For example, to add S3 delete tags with Spark 3.0, you can start the Spark SQL shell with: 
+For example, to add S3 delete tags with Spark 3.3, you can start the Spark SQL shell with: 
 
 ```
 sh spark-sql --conf spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog \
@@ -468,7 +468,7 @@ Users can also use the catalog property `s3.delete.num-threads` to mention the n
 
 When the catalog property `s3.write.table-tag-enabled` and `s3.write.namespace-tag-enabled` is set to `true` then the objects in S3 will be saved with tags: `iceberg.table=<table-name>` and `iceberg.namespace=<namespace-name>`.
 Users can define access and data retention policy per namespace or table based on these tags.
-For example, to write table and namespace name as S3 tags with Spark 3.0, you can start the Spark SQL shell with:
+For example, to write table and namespace name as S3 tags with Spark 3.3, you can start the Spark SQL shell with:
 ```
 sh spark-sql --conf spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog \
     --conf spark.sql.catalog.my_catalog.warehouse=s3://iceberg-warehouse/s3-tagging \
@@ -488,7 +488,7 @@ disaster recovery, etc.
 For using cross-region access points, we need to additionally set `use-arn-region-enabled` catalog property to
 `true` to enable `S3FileIO` to make cross-region calls, it's not required for same / multi-region access points.
 
-For example, to use S3 access-point with Spark 3.0, you can start the Spark SQL shell with:
+For example, to use S3 access-point with Spark 3.3, you can start the Spark SQL shell with:
 ```
 spark-sql --conf spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog \
     --conf spark.sql.catalog.my_catalog.warehouse=s3://my-bucket2/my/key/prefix \
@@ -509,7 +509,7 @@ For more details on using access-points, please refer [Using access points with
 
 To use S3 Acceleration, we need to set `s3.acceleration-enabled` catalog property to `true` to enable `S3FileIO` to make accelerated S3 calls.
 
-For example, to use S3 Acceleration with Spark 3.0, you can start the Spark SQL shell with:
+For example, to use S3 Acceleration with Spark 3.3, you can start the Spark SQL shell with:
 ```
 spark-sql --conf spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog \
     --conf spark.sql.catalog.my_catalog.warehouse=s3://my-bucket2/my/key/prefix \
@@ -527,7 +527,7 @@ When clients make a request to a dual-stack endpoint, the bucket URL resolves to
 
 To use S3 Dual-stack, we need to set `s3.dualstack-enabled` catalog property to `true` to enable `S3FileIO` to make dual-stack S3 calls.
 
-For example, to use S3 Dual-stack with Spark 3.0, you can start the Spark SQL shell with:
+For example, to use S3 Dual-stack with Spark 3.3, you can start the Spark SQL shell with:
 ```
 spark-sql --conf spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog \
     --conf spark.sql.catalog.my_catalog.warehouse=s3://my-bucket2/my/key/prefix \
@@ -564,7 +564,7 @@ The Glue, S3 and DynamoDB clients are then initialized with the assume-role cred
 Here is an example to start Spark shell with this client factory:
 
 ```shell
-spark-sql --packages org.apache.iceberg:iceberg-spark3-runtime:{{% icebergVersion %}},software.amazon.awssdk:bundle:2.17.257 \
+spark-sql --packages org.apache.iceberg:iceberg-spark-runtime:{{% icebergVersion %}},software.amazon.awssdk:bundle:2.17.257 \
     --conf spark.sql.catalog.my_catalog=org.apache.iceberg.spark.SparkCatalog \
     --conf spark.sql.catalog.my_catalog.warehouse=s3://my-bucket/my/key/prefix \    
     --conf spark.sql.catalog.my_catalog.catalog-impl=org.apache.iceberg.aws.glue.GlueCatalog \
@@ -658,7 +658,7 @@ AWS_PACKAGES=(
 )
 
 ICEBERG_PACKAGES=(
-  "iceberg-spark3-runtime"
+  "iceberg-spark-runtime-3.3_2.12"
   "iceberg-flink-runtime"
 )
 
diff --git a/docs/java-api.md b/docs/java-api.md
index 7ba9dfd1a5..15731e41df 100644
--- a/docs/java-api.md
+++ b/docs/java-api.md
@@ -252,10 +252,7 @@ Iceberg table support is organized in library modules:
 
 This project Iceberg also has modules for adding Iceberg support to processing engines and associated tooling:
 
-* `iceberg-spark2` is an implementation of Spark's Datasource V2 API in 2.4 for Iceberg (use iceberg-spark-runtime for a shaded version)
-* `iceberg-spark3` is an implementation of Spark's Datasource V2 API in 3.0 for Iceberg (use iceberg-spark3-runtime for a shaded version)
-* `iceberg-spark-3.1` is an implementation of Spark's Datasource V2 API in 3.1 for Iceberg (use iceberg-spark-runtime-3.1 for a shaded version)
-* `iceberg-spark-3.2` is an implementation of Spark's Datasource V2 API in 3.2 for Iceberg (use iceberg-spark-runtime-3.2 for a shaded version)
+* `iceberg-spark` is an implementation of Spark's Datasource V2 API for Iceberg with submodules for each spark versions (use runtime jars for a shaded version)
 * `iceberg-flink` is an implementation of Flink's Table and DataStream API for Iceberg (use iceberg-flink-runtime for a shaded version)
 * `iceberg-hive3` is an implementation of Hive 3 specific SerDe's for Timestamp, TimestampWithZone, and Date object inspectors (use iceberg-hive-runtime for a shaded version).
 * `iceberg-mr` is an implementation of MapReduce and Hive InputFormats and SerDes for Iceberg (use iceberg-hive-runtime for a shaded version for use with Hive)
diff --git a/docs/nessie.md b/docs/nessie.md
index 437ba8f486..a2846356e7 100644
--- a/docs/nessie.md
+++ b/docs/nessie.md
@@ -38,16 +38,16 @@ See [Project Nessie](https://projectnessie.org) for more information on Nessie.
 ## Enabling Nessie Catalog
 
 The `iceberg-nessie` module is bundled with Spark and Flink runtimes for all versions from `0.11.0`. To get started
-with Nessie and Iceberg simply add the Iceberg runtime to your process. Eg: `spark-sql --packages
-org.apache.iceberg:iceberg-spark3-runtime:{{% icebergVersion %}}`. 
+with Nessie (with spark-3.3) and Iceberg simply add the Iceberg runtime to your process. Eg: `spark-sql --packages
+org.apache.iceberg:iceberg-spark-runtime-3.3_2.12:{{% icebergVersion %}}`. 
 
 ## Spark SQL Extensions
 
-From Spark 3.0, Nessie SQL extensions can be used to manage the Nessie repo as shown below. 
+From Spark 3.3 (with scala 2.12), Nessie SQL extensions can be used to manage the Nessie repo as shown below. 
 
 ```
 bin/spark-sql 
-  --packages "org.apache.iceberg:iceberg-spark3-runtime:{{% icebergVersion %}},org.projectnessie:nessie-spark-extensions:{{% nessieVersion %}}"
+  --packages "org.apache.iceberg:iceberg-spark-runtime-3.3_2.12:{{% icebergVersion %}},org.projectnessie:nessie-spark-extensions:{{% nessieVersion %}}"
   --conf spark.sql.extensions="org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions,org.projectnessie.spark.extensions.NessieSparkSessionExtensions"
   --conf <other settings>
 ```
diff --git a/docs/spark-configuration.md b/docs/spark-configuration.md
index 052aa8da47..d2267d8218 100644
--- a/docs/spark-configuration.md
+++ b/docs/spark-configuration.md
@@ -29,7 +29,7 @@ menu:
 
 ## Catalogs
 
-Spark 3.0 adds an API to plug in table catalogs that are used to load, create, and manage Iceberg tables. Spark catalogs are configured by setting Spark properties under `spark.sql.catalog`.
+Spark adds an API to plug in table catalogs that are used to load, create, and manage Iceberg tables. Spark catalogs are configured by setting Spark properties under `spark.sql.catalog`.
 
 This creates an Iceberg catalog named `hive_prod` that loads tables from a Hive metastore:
 
@@ -128,7 +128,7 @@ spark.sql.catalog.custom_prod.my-additional-catalog-config = my-value
 
 When using Iceberg 0.11.0 and later, Spark 2.4 can load tables from multiple Iceberg catalogs or from table locations.
 
-Catalogs in 2.4 are configured just like catalogs in 3.0, but only Iceberg catalogs are supported.
+Catalogs in 2.4 are configured just like catalogs in 3.x, but only Iceberg catalogs are supported.
 
 
 ## SQL Extensions
diff --git a/docs/spark-ddl.md b/docs/spark-ddl.md
index e520c94656..d5e89b332b 100644
--- a/docs/spark-ddl.md
+++ b/docs/spark-ddl.md
@@ -32,12 +32,12 @@ To use Iceberg in Spark, first configure [Spark catalogs](../spark-configuration
 Iceberg uses Apache Spark's DataSourceV2 API for data source and catalog implementations. Spark DSv2 is an evolving API with different levels of support in Spark versions. Spark 2.4 does not support SQL DDL.
 
 {{< hint info >}}
-Spark 2.4 can't create Iceberg tables with DDL, instead use Spark 3.x or the [Iceberg API](..//java-api-quickstart).
+Spark 2.4 can't create Iceberg tables with DDL, instead use Spark 3 or the [Iceberg API](..//java-api-quickstart).
 {{< /hint >}}
 
 ## `CREATE TABLE`
 
-Spark 3.0 can create tables in any Iceberg catalog with the clause `USING iceberg`:
+Spark 3 can create tables in any Iceberg catalog with the clause `USING iceberg`:
 
 ```sql
 CREATE TABLE prod.db.sample (
@@ -333,7 +333,7 @@ ALTER TABLE prod.db.sample DROP COLUMN point.z
 
 ## `ALTER TABLE` SQL extensions
 
-These commands are available in Spark 3.x when using Iceberg [SQL extensions](../spark-configuration#sql-extensions).
+These commands are available in Spark 3 when using Iceberg [SQL extensions](../spark-configuration#sql-extensions).
 
 ### `ALTER TABLE ... ADD PARTITION FIELD`
 
diff --git a/docs/spark-procedures.md b/docs/spark-procedures.md
index 8b4c539d76..7631f271b6 100644
--- a/docs/spark-procedures.md
+++ b/docs/spark-procedures.md
@@ -27,7 +27,7 @@ menu:
 
 # Spark Procedures
 
-To use Iceberg in Spark, first configure [Spark catalogs](../spark-configuration). Stored procedures are only available when using [Iceberg SQL extensions](../spark-configuration#sql-extensions) in Spark 3.x.
+To use Iceberg in Spark, first configure [Spark catalogs](../spark-configuration). Stored procedures are only available when using [Iceberg SQL extensions](../spark-configuration#sql-extensions) in Spark 3.
 
 ## Usage
 
diff --git a/docs/spark-queries.md b/docs/spark-queries.md
index 126600a131..08ca4c2e19 100644
--- a/docs/spark-queries.md
+++ b/docs/spark-queries.md
@@ -31,8 +31,8 @@ To use Iceberg in Spark, first configure [Spark catalogs](../spark-configuration
 
 Iceberg uses Apache Spark's DataSourceV2 API for data source and catalog implementations. Spark DSv2 is an evolving API with different levels of support in Spark versions:
 
-| Feature support                                  | Spark 3.0| Spark 2.4  | Notes                                          |
-|--------------------------------------------------|----------|------------|------------------------------------------------|
+| Feature support                                  | Spark 3 | Spark 2.4  | Notes                                          |
+|--------------------------------------------------|-----------|------------|------------------------------------------------|
 | [`SELECT`](#querying-with-sql)                   | ✔️        |            |                                                |
 | [DataFrame reads](#querying-with-dataframes)     | ✔️        | ✔️          |                                                |
 | [Metadata table `SELECT`](#inspecting-tables)    | ✔️        |            |                                                |
@@ -75,7 +75,7 @@ val df = spark.table("prod.db.table")
 
 ### Catalogs with DataFrameReader
 
-Iceberg 0.11.0 adds multi-catalog support to `DataFrameReader` in both Spark 3.x and 2.4.
+Iceberg 0.11.0 adds multi-catalog support to `DataFrameReader` in both Spark 3 and 2.4.
 
 Paths and table names can be loaded with Spark's `DataFrameReader` interface. How tables are loaded depends on how
 the identifier is specified. When using `spark.read.format("iceberg").load(table)` or `spark.table(table)` the `table`
diff --git a/docs/spark-structured-streaming.md b/docs/spark-structured-streaming.md
index 587a3c1c3a..bdb4b34057 100644
--- a/docs/spark-structured-streaming.md
+++ b/docs/spark-structured-streaming.md
@@ -30,11 +30,11 @@ menu:
 Iceberg uses Apache Spark's DataSourceV2 API for data source and catalog implementations. Spark DSv2 is an evolving API
 with different levels of support in Spark versions.
 
-As of Spark 3.0, DataFrame reads and writes are supported.
+As of Spark 3, DataFrame reads and writes are supported.
 
-| Feature support                                  | Spark 3.0| Spark 2.4  | Notes                                          |
-|--------------------------------------------------|----------|------------|------------------------------------------------|
-| [DataFrame write](#streaming-writes)             | ✔        | ✔          |                                                |
+| Feature support                                  | Spark 3 | Spark 2.4  | Notes                                          |
+|--------------------------------------------------|-----------|------------|------------------------------------------------|
+| [DataFrame write](#streaming-writes)             | ✔         | ✔          |                                                |
 
 ## Streaming Reads
 
diff --git a/docs/spark-writes.md b/docs/spark-writes.md
index 0f7d6de47f..06025d71da 100644
--- a/docs/spark-writes.md
+++ b/docs/spark-writes.md
@@ -29,20 +29,20 @@ menu:
 
 To use Iceberg in Spark, first configure [Spark catalogs](../spark-configuration).
 
-Some plans are only available when using [Iceberg SQL extensions](../spark-configuration#sql-extensions) in Spark 3.x.
+Some plans are only available when using [Iceberg SQL extensions](../spark-configuration#sql-extensions) in Spark 3.
 
 Iceberg uses Apache Spark's DataSourceV2 API for data source and catalog implementations. Spark DSv2 is an evolving API with different levels of support in Spark versions:
 
-| Feature support                                  | Spark 3.0| Spark 2.4  | Notes                                          |
-|--------------------------------------------------|----------|------------|------------------------------------------------|
-| [SQL insert into](#insert-into)                  | ✔️        |            |                                                |
-| [SQL merge into](#merge-into)                    | ✔️        |            | ⚠ Requires Iceberg Spark extensions            |
-| [SQL insert overwrite](#insert-overwrite)        | ✔️        |            |                                                |
-| [SQL delete from](#delete-from)                  | ✔️        |            | ⚠ Row-level delete requires Spark extensions   |
-| [SQL update](#update)                            | ✔️        |            | ⚠ Requires Iceberg Spark extensions            |
-| [DataFrame append](#appending-data)              | ✔️        | ✔️          |                                                |
-| [DataFrame overwrite](#overwriting-data)         | ✔️        | ✔️          | ⚠ Behavior changed in Spark 3.0                |
-| [DataFrame CTAS and RTAS](#creating-tables)      | ✔️        |            |                                                |
+| Feature support                                  | Spark 3 | Spark 2.4  | Notes                                        |
+|--------------------------------------------------|-----------|------------|----------------------------------------------|
+| [SQL insert into](#insert-into)                  | ✔️        |            |                                              |
+| [SQL merge into](#merge-into)                    | ✔️        |            | ⚠ Requires Iceberg Spark extensions          |
+| [SQL insert overwrite](#insert-overwrite)        | ✔️        |            |                                              |
+| [SQL delete from](#delete-from)                  | ✔️        |            | ⚠ Row-level delete requires Spark extensions |
+| [SQL update](#update)                            | ✔️        |            | ⚠ Requires Iceberg Spark extensions          |
+| [DataFrame append](#appending-data)              | ✔️        | ✔️          |                                              |
+| [DataFrame overwrite](#overwriting-data)         | ✔️        | ✔️          | ⚠ Behavior changed in Spark 3              |
+| [DataFrame CTAS and RTAS](#creating-tables)      | ✔️        |            |                                              |
 
 
 ## Writing with SQL
@@ -109,14 +109,6 @@ Only one record in the source data can update any given row of the target table,
 
 The partitions that will be replaced by `INSERT OVERWRITE` depends on Spark's partition overwrite mode and the partitioning of a table. `MERGE INTO` can rewrite only affected data files and has more easily understood behavior, so it is recommended instead of `INSERT OVERWRITE`.
 
-{{< hint danger >}}
-Spark 3.0.0 has a correctness bug that affects dynamic `INSERT OVERWRITE` with hidden partitioning, [SPARK-32168][spark-32168].
-For tables with [hidden partitions](../partitioning/#icebergs-hidden-partitioning), make sure you use Spark 3.0.1.
-{{< /hint >}}
-
-[spark-32168]: https://issues.apache.org/jira/browse/SPARK-32168
-
-
 #### Overwrite behavior
 
 Spark's default overwrite mode is **static**, but **dynamic overwrite mode is recommended when writing to Iceberg tables.** Static overwrite mode determines which partitions to overwrite in a table by converting the `PARTITION` clause to a filter, but the `PARTITION` clause can only reference table columns.
diff --git a/gradle.properties b/gradle.properties
index f7c27c3537..ad3884e238 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -20,7 +20,7 @@ systemProp.knownFlinkVersions=1.14,1.15,1.16
 systemProp.defaultHiveVersions=2
 systemProp.knownHiveVersions=2,3
 systemProp.defaultSparkVersions=3.3
-systemProp.knownSparkVersions=2.4,3.0,3.1,3.2,3.3
+systemProp.knownSparkVersions=2.4,3.1,3.2,3.3
 systemProp.defaultScalaVersion=2.12
 systemProp.knownScalaVersions=2.12,2.13
 org.gradle.parallel=true
diff --git a/jmh.gradle b/jmh.gradle
index cbbd58d0fc..44371344ed 100644
--- a/jmh.gradle
+++ b/jmh.gradle
@@ -29,10 +29,6 @@ if (jdkVersion == '8' && sparkVersions.contains("2.4")) {
   jmhProjects.add(project(":iceberg-spark:iceberg-spark-2.4"))
 }
 
-if (sparkVersions.contains("3.0")) {
-  jmhProjects.add(project(":iceberg-spark:iceberg-spark-3.0_2.12"))
-}
-
 if (sparkVersions.contains("3.1")) {
   jmhProjects.add(project(":iceberg-spark:iceberg-spark-3.1_2.12"))
 }
diff --git a/settings.gradle b/settings.gradle
index abc8f4c001..7a9e4c4963 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -115,18 +115,6 @@ if (flinkVersions.contains("1.16")) {
   project(":iceberg-flink:flink-runtime-1.16").name = "iceberg-flink-runtime-1.16"
 }
 
-if (sparkVersions.contains("3.0")) {
-  include ':iceberg-spark:spark-3.0_2.12'
-  include ':iceberg-spark:spark-extensions-3.0_2.12'
-  include ':iceberg-spark:spark-runtime-3.0_2.12'
-  project(':iceberg-spark:spark-3.0_2.12').projectDir = file('spark/v3.0/spark')
-  project(':iceberg-spark:spark-3.0_2.12').name = 'iceberg-spark-3.0_2.12'
-  project(':iceberg-spark:spark-extensions-3.0_2.12').projectDir = file('spark/v3.0/spark-extensions')
-  project(':iceberg-spark:spark-extensions-3.0_2.12').name = 'iceberg-spark-extensions-3.0_2.12'
-  project(':iceberg-spark:spark-runtime-3.0_2.12').projectDir = file('spark/v3.0/spark-runtime')
-  project(':iceberg-spark:spark-runtime-3.0_2.12').name = 'iceberg-spark-runtime-3.0_2.12'
-}
-
 if (sparkVersions.contains("3.1")) {
   include ':iceberg-spark:spark-3.1_2.12'
   include ':iceberg-spark:spark-extensions-3.1_2.12'
diff --git a/spark/build.gradle b/spark/build.gradle
index 1f0485889e..d1a8549df0 100644
--- a/spark/build.gradle
+++ b/spark/build.gradle
@@ -24,10 +24,6 @@ if (jdkVersion == '8' && sparkVersions.contains("2.4")) {
   apply from: file("$projectDir/v2.4/build.gradle")
 }
 
-if (sparkVersions.contains("3.0")) {
-  apply from: file("$projectDir/v3.0/build.gradle")
-}
-
 if (sparkVersions.contains("3.1")) {
   apply from: file("$projectDir/v3.1/build.gradle")
 }
diff --git a/spark/v2.4/spark/src/main/java/org/apache/iceberg/actions/Actions.java b/spark/v2.4/spark/src/main/java/org/apache/iceberg/actions/Actions.java
index ee55bbf766..8829e8132c 100644
--- a/spark/v2.4/spark/src/main/java/org/apache/iceberg/actions/Actions.java
+++ b/spark/v2.4/spark/src/main/java/org/apache/iceberg/actions/Actions.java
@@ -27,7 +27,7 @@ import org.apache.spark.sql.SparkSession;
  *
  * @deprecated since 0.12.0, used for supporting {@link RewriteDataFilesAction} in Spark 2.4 for
  *     backward compatibility. This implementation is no longer maintained, the new implementation
- *     is available with Spark 3.x
+ *     is available with Spark 3
  */
 @Deprecated
 public class Actions {
@@ -68,7 +68,7 @@ public class Actions {
   /**
    * @deprecated since 0.12.0, used for supporting {@link RewriteDataFilesAction} in Spark 2.4 for
    *     backward compatibility. This implementation is no longer maintained, the new implementation
-   *     is available with Spark 3.x
+   *     is available with Spark 3
    */
   @Deprecated
   public static Actions forTable(SparkSession spark, Table table) {
@@ -78,7 +78,7 @@ public class Actions {
   /**
    * @deprecated since 0.12.0, used for supporting {@link RewriteDataFilesAction} in Spark 2.4 for
    *     backward compatibility. This implementation is no longer maintained, the new implementation
-   *     is available with Spark 3.x
+   *     is available with Spark 3
    */
   @Deprecated
   public static Actions forTable(Table table) {
@@ -88,7 +88,7 @@ public class Actions {
   /**
    * @deprecated since 0.12.0, used for supporting {@link RewriteDataFilesAction} in Spark 2.4 for
    *     backward compatibility. This implementation is no longer maintained, the new implementation
-   *     is available with Spark 3.x
+   *     is available with Spark 3
    */
   @Deprecated
   public RewriteDataFilesAction rewriteDataFiles() {
diff --git a/spark/v2.4/spark/src/main/java/org/apache/iceberg/actions/RewriteDataFilesAction.java b/spark/v2.4/spark/src/main/java/org/apache/iceberg/actions/RewriteDataFilesAction.java
index bbbb57adf5..9e89786fc1 100644
--- a/spark/v2.4/spark/src/main/java/org/apache/iceberg/actions/RewriteDataFilesAction.java
+++ b/spark/v2.4/spark/src/main/java/org/apache/iceberg/actions/RewriteDataFilesAction.java
@@ -33,7 +33,7 @@ import org.apache.spark.sql.SparkSession;
 
 /**
  * @deprecated since 0.12.0, keeping this in Spark 2.4 for backward compatibility. This
- *     implementation is no longer maintained, the new implementation is available with Spark 3.x
+ *     implementation is no longer maintained, the new implementation is available with Spark 3
  */
 @Deprecated
 public class RewriteDataFilesAction extends BaseRewriteDataFilesAction<RewriteDataFilesAction> {
diff --git a/spark/v2.4/spark/src/main/java/org/apache/iceberg/actions/SparkActions.java b/spark/v2.4/spark/src/main/java/org/apache/iceberg/actions/SparkActions.java
index 9985c26214..a399c6e483 100644
--- a/spark/v2.4/spark/src/main/java/org/apache/iceberg/actions/SparkActions.java
+++ b/spark/v2.4/spark/src/main/java/org/apache/iceberg/actions/SparkActions.java
@@ -24,7 +24,7 @@ import org.apache.spark.sql.SparkSession;
 /**
  * @deprecated since 0.12.0, used for supporting {@link RewriteDataFilesAction} in Spark 2.4 for
  *     backward compatibility. This implementation is no longer maintained, the new implementation
- *     is available with Spark 3.x
+ *     is available with Spark 3
  */
 @Deprecated
 class SparkActions extends Actions {
diff --git a/spark/v2.4/spark/src/test/java/org/apache/iceberg/examples/README.md b/spark/v2.4/spark/src/test/java/org/apache/iceberg/examples/README.md
index 5f02061b62..eca410dfea 100644
--- a/spark/v2.4/spark/src/test/java/org/apache/iceberg/examples/README.md
+++ b/spark/v2.4/spark/src/test/java/org/apache/iceberg/examples/README.md
@@ -164,7 +164,7 @@ Code examples can be found [here](SnapshotFunctionalityTest.java).
 Iceberg provides support to handle schema evolution of your tables over time:
 
 1. Add a new column
-    1. The new column is always added at the end of the table (**NOTE**: This will be fixed with Spark 3.0 which has implemented AFTER and FIRST operations).
+    1. The new column is always added at the end of the table (**NOTE**: This is fixed in Spark 3 which has implemented AFTER and FIRST operations).
     1. You are only able to add a column at the end of the schema, not somewhere in the middle. 
     1. Any rows using the earlier schema return a `null` value for this new column. You cannot use an alternative default value.
     1. This column automatically becomes an `optional` column, meaning adding data to this column isn't enforced for each future write. 
diff --git a/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java b/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java
index 3de2bf5caa..b3ceb0e2cb 100644
--- a/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java
+++ b/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java
@@ -283,7 +283,7 @@ public class TestDataFrameWrites extends AvroDataTest {
   @Test
   public void testNullableWithWriteOption() throws IOException {
     Assume.assumeTrue(
-        "Spark 3.0 rejects writing nulls to a required column", spark.version().startsWith("2"));
+        "Spark 3 rejects writing nulls to a required column", spark.version().startsWith("2"));
 
     File location = new File(temp.newFolder("parquet"), "test");
     String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location.toString());
@@ -336,7 +336,7 @@ public class TestDataFrameWrites extends AvroDataTest {
   @Test
   public void testNullableWithSparkSqlOption() throws IOException {
     Assume.assumeTrue(
-        "Spark 3.0 rejects writing nulls to a required column", spark.version().startsWith("2"));
+        "Spark 3 rejects writing nulls to a required column", spark.version().startsWith("2"));
 
     File location = new File(temp.newFolder("parquet"), "test");
     String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location.toString());
diff --git a/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java b/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java
index 2629e42553..a2fb66cf4b 100644
--- a/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java
+++ b/spark/v2.4/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java
@@ -395,7 +395,7 @@ public class TestSparkDataWrite {
   @Test
   public void testWriteProjection() throws IOException {
     Assume.assumeTrue(
-        "Not supported in Spark 3.0; analysis requires all columns are present",
+        "Not supported in Spark 3; analysis requires all columns are present",
         spark.version().startsWith("2"));
 
     File parent = temp.newFolder(format.toString());
@@ -431,7 +431,7 @@ public class TestSparkDataWrite {
   @Test
   public void testWriteProjectionWithMiddle() throws IOException {
     Assume.assumeTrue(
-        "Not supported in Spark 3.0; analysis requires all columns are present",
+        "Not supported in Spark 3; analysis requires all columns are present",
         spark.version().startsWith("2"));
 
     File parent = temp.newFolder(format.toString());
diff --git a/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java b/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java
index 3de2bf5caa..b3ceb0e2cb 100644
--- a/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java
+++ b/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java
@@ -283,7 +283,7 @@ public class TestDataFrameWrites extends AvroDataTest {
   @Test
   public void testNullableWithWriteOption() throws IOException {
     Assume.assumeTrue(
-        "Spark 3.0 rejects writing nulls to a required column", spark.version().startsWith("2"));
+        "Spark 3 rejects writing nulls to a required column", spark.version().startsWith("2"));
 
     File location = new File(temp.newFolder("parquet"), "test");
     String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location.toString());
@@ -336,7 +336,7 @@ public class TestDataFrameWrites extends AvroDataTest {
   @Test
   public void testNullableWithSparkSqlOption() throws IOException {
     Assume.assumeTrue(
-        "Spark 3.0 rejects writing nulls to a required column", spark.version().startsWith("2"));
+        "Spark 3 rejects writing nulls to a required column", spark.version().startsWith("2"));
 
     File location = new File(temp.newFolder("parquet"), "test");
     String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location.toString());
diff --git a/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java b/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java
index 514e04e3eb..66247c24e2 100644
--- a/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java
+++ b/spark/v3.1/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java
@@ -395,7 +395,7 @@ public class TestSparkDataWrite {
   @Test
   public void testWriteProjection() throws IOException {
     Assume.assumeTrue(
-        "Not supported in Spark 3.0; analysis requires all columns are present",
+        "Not supported in Spark 3; analysis requires all columns are present",
         spark.version().startsWith("2"));
 
     File parent = temp.newFolder(format.toString());
@@ -431,7 +431,7 @@ public class TestSparkDataWrite {
   @Test
   public void testWriteProjectionWithMiddle() throws IOException {
     Assume.assumeTrue(
-        "Not supported in Spark 3.0; analysis requires all columns are present",
+        "Not supported in Spark 3; analysis requires all columns are present",
         spark.version().startsWith("2"));
 
     File parent = temp.newFolder(format.toString());
diff --git a/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java b/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java
index 3de2bf5caa..b3ceb0e2cb 100644
--- a/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java
+++ b/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java
@@ -283,7 +283,7 @@ public class TestDataFrameWrites extends AvroDataTest {
   @Test
   public void testNullableWithWriteOption() throws IOException {
     Assume.assumeTrue(
-        "Spark 3.0 rejects writing nulls to a required column", spark.version().startsWith("2"));
+        "Spark 3 rejects writing nulls to a required column", spark.version().startsWith("2"));
 
     File location = new File(temp.newFolder("parquet"), "test");
     String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location.toString());
@@ -336,7 +336,7 @@ public class TestDataFrameWrites extends AvroDataTest {
   @Test
   public void testNullableWithSparkSqlOption() throws IOException {
     Assume.assumeTrue(
-        "Spark 3.0 rejects writing nulls to a required column", spark.version().startsWith("2"));
+        "Spark 3 rejects writing nulls to a required column", spark.version().startsWith("2"));
 
     File location = new File(temp.newFolder("parquet"), "test");
     String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location.toString());
diff --git a/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java b/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java
index 514e04e3eb..66247c24e2 100644
--- a/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java
+++ b/spark/v3.2/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java
@@ -395,7 +395,7 @@ public class TestSparkDataWrite {
   @Test
   public void testWriteProjection() throws IOException {
     Assume.assumeTrue(
-        "Not supported in Spark 3.0; analysis requires all columns are present",
+        "Not supported in Spark 3; analysis requires all columns are present",
         spark.version().startsWith("2"));
 
     File parent = temp.newFolder(format.toString());
@@ -431,7 +431,7 @@ public class TestSparkDataWrite {
   @Test
   public void testWriteProjectionWithMiddle() throws IOException {
     Assume.assumeTrue(
-        "Not supported in Spark 3.0; analysis requires all columns are present",
+        "Not supported in Spark 3; analysis requires all columns are present",
         spark.version().startsWith("2"));
 
     File parent = temp.newFolder(format.toString());
diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java
index 517ae200ca..310e69b827 100644
--- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java
+++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java
@@ -283,7 +283,7 @@ public class TestDataFrameWrites extends AvroDataTest {
   @Test
   public void testNullableWithWriteOption() throws IOException {
     Assume.assumeTrue(
-        "Spark 3.0 rejects writing nulls to a required column", spark.version().startsWith("2"));
+        "Spark 3 rejects writing nulls to a required column", spark.version().startsWith("2"));
 
     File location = new File(temp.newFolder("parquet"), "test");
     String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location.toString());
@@ -336,7 +336,7 @@ public class TestDataFrameWrites extends AvroDataTest {
   @Test
   public void testNullableWithSparkSqlOption() throws IOException {
     Assume.assumeTrue(
-        "Spark 3.0 rejects writing nulls to a required column", spark.version().startsWith("2"));
+        "Spark 3 rejects writing nulls to a required column", spark.version().startsWith("2"));
 
     File location = new File(temp.newFolder("parquet"), "test");
     String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location.toString());
diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java
index 514e04e3eb..66247c24e2 100644
--- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java
+++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestSparkDataWrite.java
@@ -395,7 +395,7 @@ public class TestSparkDataWrite {
   @Test
   public void testWriteProjection() throws IOException {
     Assume.assumeTrue(
-        "Not supported in Spark 3.0; analysis requires all columns are present",
+        "Not supported in Spark 3; analysis requires all columns are present",
         spark.version().startsWith("2"));
 
     File parent = temp.newFolder(format.toString());
@@ -431,7 +431,7 @@ public class TestSparkDataWrite {
   @Test
   public void testWriteProjectionWithMiddle() throws IOException {
     Assume.assumeTrue(
-        "Not supported in Spark 3.0; analysis requires all columns are present",
+        "Not supported in Spark 3; analysis requires all columns are present",
         spark.version().startsWith("2"));
 
     File parent = temp.newFolder(format.toString());