You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@sedona.apache.org by ji...@apache.org on 2023/05/01 07:49:51 UTC

[sedona] branch master updated: [SEDONA-276][SEDONA-277] Support Spark 3.4 (#825)

This is an automated email from the ASF dual-hosted git repository.

jiayu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/sedona.git


The following commit(s) were added to refs/heads/master by this push:
     new 56239fdf [SEDONA-276][SEDONA-277] Support Spark 3.4 (#825)
56239fdf is described below

commit 56239fdf0ff38666ff686edd0fba8e07fd042080
Author: Kristin Cowalcijk <bo...@wherobots.com>
AuthorDate: Mon May 1 15:49:44 2023 +0800

    [SEDONA-276][SEDONA-277] Support Spark 3.4 (#825)
---
 .github/workflows/java.yml                         |  21 ++-
 .github/workflows/python.yml                       |  19 +-
 .github/workflows/r.yml                            |  22 ++-
 R/tests/testthat/helper-initialize.R               |   8 +-
 common/pom.xml                                     |  19 +-
 core/pom.xml                                       |  21 +++
 docs/setup/compile.md                              |  25 ++-
 docs/setup/install-python.md                       |   7 +-
 docs/setup/install-scala.md                        |  38 ++--
 docs/setup/maven-coordinates.md                    | 166 +++++++++++++++---
 docs/setup/platform.md                             |  30 ++--
 docs/tutorial/sql-pure-sql.md                      |  28 ++-
 docs/tutorial/sql.md                               |   3 +-
 flink-shaded/pom.xml                               |  13 +-
 flink/pom.xml                                      |   5 +-
 pom.xml                                            |  98 +++++------
 python-adapter/pom.xml                             |  22 +++
 spark-shaded/pom.xml                               |  11 --
 sql/common/.gitignore                              |  12 ++
 sql/{ => common}/pom.xml                           |  17 +-
 sql/{ => common}/src/.gitignore                    |   0
 sql/{ => common}/src/main/.gitignore               |   0
 .../apache/sedona/sql/SedonaSqlExtensions.scala    |   0
 .../scala/org/apache/sedona/sql/UDF/Catalog.scala  |   0
 .../org/apache/sedona/sql/UDF/UdfRegistrator.scala |   0
 .../org/apache/sedona/sql/UDT/UdtRegistrator.scala |   0
 .../org/apache/sedona/sql/utils/Adapter.scala      |   0
 .../sedona/sql/utils/GeometrySerializer.scala      |   0
 .../apache/sedona/sql/utils/IndexSerializer.scala  |   0
 .../sedona/sql/utils/SedonaSQLRegistrator.scala    |   0
 .../parquet/GeoParquetFileFormatBase.scala         |  33 ++++
 .../datasources/parquet/GeoParquetMetaData.scala   |   0
 .../parquet/GeoParquetSpatialFilter.scala          |   0
 .../spark/sql/sedona_sql/UDT/GeometryUDT.scala     |   0
 .../apache/spark/sql/sedona_sql/UDT/IndexUDT.scala |   0
 .../spark/sql/sedona_sql/UDT/RasterUDT.scala       |   0
 .../sql/sedona_sql/UDT/UdtRegistratorWrapper.scala |   0
 .../sql/sedona_sql/execution/SedonaSparkPlan.scala |   0
 .../expressions/AggregateFunctions.scala           |   0
 .../sql/sedona_sql/expressions/Constructors.scala  |   0
 .../sql/sedona_sql/expressions/DataFrameAPI.scala  |   0
 .../sql/sedona_sql/expressions/Functions.scala     |   0
 .../expressions/NullSafeExpressions.scala          |   0
 .../sql/sedona_sql/expressions/Predicates.scala    |   0
 .../sql/sedona_sql/expressions/SerdeAware.scala    |   0
 .../expressions/collect/ST_Collect.scala           |   0
 .../expressions/geohash/GeoHashDecoder.scala       |   0
 .../sql/sedona_sql/expressions/implicits.scala     |   0
 .../expressions/raster/Constructors.scala          |   0
 .../sedona_sql/expressions/raster/Functions.scala  |   0
 .../sql/sedona_sql/expressions/raster/IO.scala     |   0
 .../sedona_sql/expressions/raster/implicits.scala  |   0
 .../sql/sedona_sql/expressions/st_aggregates.scala |   0
 .../sedona_sql/expressions/st_constructors.scala   |   0
 .../sql/sedona_sql/expressions/st_functions.scala  |   0
 .../sql/sedona_sql/expressions/st_predicates.scala |   0
 .../spark/sql/sedona_sql/io/GeotiffSchema.scala    |   0
 .../spark/sql/sedona_sql/io/HadoopUtils.scala      |   0
 .../spark/sql/sedona_sql/io/ImageReadOptions.scala |   0
 .../sql/sedona_sql/io/ImageWriteOptions.scala      |   0
 .../sedona_sql/optimization/ExpressionUtils.scala  |   0
 .../SpatialFilterPushDownForGeoParquet.scala       |   6 +-
 .../strategy/join/BroadcastIndexJoinExec.scala     |   0
 .../strategy/join/DistanceJoinExec.scala           |   0
 .../strategy/join/JoinQueryDetector.scala          |   0
 .../sedona_sql/strategy/join/RangeJoinExec.scala   |   0
 .../strategy/join/SpatialIndexExec.scala           |   0
 .../strategy/join/TraitJoinQueryBase.scala         |   0
 .../strategy/join/TraitJoinQueryExec.scala         |   0
 .../spark/sql/sedona_sql/strategy/join/enums.scala |   0
 sql/{ => common}/src/test/.gitignore               |   0
 .../org/apache/sedona/sql/adapterTestJava.java     |   0
 .../src/test/resources/log4j2.properties           |   0
 .../sedona/sql/BroadcastIndexJoinSuite.scala       |   0
 .../org/apache/sedona/sql/DeduplicationSuite.scala |   0
 .../org/apache/sedona/sql/GeometrySample.scala     |   0
 .../apache/sedona/sql/GeometryUdtTestScala.scala   |   0
 .../org/apache/sedona/sql/SpatialJoinSuite.scala   |   0
 .../org/apache/sedona/sql/TestBaseScala.scala      |   2 +-
 .../org/apache/sedona/sql/adapterTestScala.scala   |   0
 .../sedona/sql/aggregateFunctionTestScala.scala    |   0
 .../apache/sedona/sql/constructorTestScala.scala   |   0
 .../apache/sedona/sql/dataFrameAPITestScala.scala  |   0
 .../org/apache/sedona/sql/functionTestScala.scala  |   0
 .../sedona/sql/functions/FunctionsHelper.scala     |   0
 .../apache/sedona/sql/functions/STS2CellIDs.scala  |   0
 .../sedona/sql/functions/StMakePolygonSpec.scala   |   0
 .../sql/functions/TestGeometrySimplify.scala       |   0
 .../sedona/sql/functions/TestStSubDivide.scala     |   2 +-
 .../sql/functions/collect/TestStCollect.scala      |   0
 .../sedona/sql/functions/geohash/Fixtures.scala    |   0
 .../functions/geohash/TestCalculatingGeoHash.scala |   0
 .../sql/functions/geohash/TestGeoHashDecoder.scala |   0
 .../sql/functions/geohash/TestStGeoHash.scala      |   0
 .../geohash/TestStGeometryFromGeoHash.scala        |   0
 .../scala/org/apache/sedona/sql/implicits.scala    |   0
 .../ogc/GeometryTypesAndFunctionsTestScala.scala   |   0
 .../org/apache/sedona/sql/ogc/package-info.java    |   0
 .../apache/sedona/sql/predicateJoinTestScala.scala |   0
 .../org/apache/sedona/sql/predicateTestScala.scala |   0
 .../org/apache/sedona/sql/rasteralgebraTest.scala  |  25 +--
 .../org/apache/sedona/sql/serdeAwareTest.scala     |   0
 sql/pom.xml                                        | 188 ++++++++------------
 sql/spark-3.0/.gitignore                           |  12 ++
 sql/{ => spark-3.0}/pom.xml                        |  20 +--
 ...org.apache.spark.sql.sources.DataSourceRegister |   0
 .../datasources/parquet/GeoDataSourceUtils.scala   |   0
 .../datasources/parquet/GeoDateTimeUtils.scala     |   0
 .../datasources/parquet/GeoParquetFileFormat.scala |   4 +-
 .../datasources/parquet/GeoParquetFilters.scala    |   0
 .../parquet/GeoParquetReadSupport.scala            |   0
 .../parquet/GeoParquetRecordMaterializer.scala     |   0
 .../parquet/GeoParquetRowConverter.scala           |   0
 .../parquet/GeoParquetSchemaConverter.scala        |   0
 .../datasources/parquet/GeoParquetUtils.scala      |   0
 .../parquet/GeoParquetWriteSupport.scala           |   0
 .../datasources/parquet/GeoSchemaMergeUtils.scala  |   0
 .../sql/sedona_sql/io/GeotiffFileFormat.scala      |   0
 .../src/test/resources/log4j2.properties           |   0
 .../sql/GeoParquetSpatialFilterPushDownSuite.scala |   0
 .../org/apache/sedona/sql/TestBaseScala.scala      |  58 +++++++
 .../org/apache/sedona/sql/geoparquetIOTests.scala  |   0
 .../scala/org/apache/sedona/sql/rasterIOTest.scala |   0
 sql/spark-3.4/.gitignore                           |  12 ++
 sql/{ => spark-3.4}/pom.xml                        |  20 +--
 ...org.apache.spark.sql.sources.DataSourceRegister |   0
 .../datasources/parquet/GeoDataSourceUtils.scala   |   0
 .../datasources/parquet/GeoParquetFileFormat.scala |  21 ++-
 .../parquet/GeoParquetReadSupport.scala            | 192 +++++++++++++++++++++
 .../parquet/GeoParquetRecordMaterializer.scala     |   0
 .../parquet/GeoParquetRowConverter.scala           |   2 +-
 .../parquet/GeoParquetSchemaConverter.scala        |   0
 .../datasources/parquet/GeoParquetUtils.scala      |   0
 .../parquet/GeoParquetWriteSupport.scala           |   2 +-
 .../sql/sedona_sql/io/GeotiffFileFormat.scala      |   7 +-
 .../src/test/resources/log4j2.properties           |   0
 .../sql/GeoParquetSpatialFilterPushDownSuite.scala |   0
 .../org/apache/sedona/sql/TestBaseScala.scala      |  58 +++++++
 .../org/apache/sedona/sql/geoparquetIOTests.scala  |   0
 .../scala/org/apache/sedona/sql/rasterIOTest.scala |   0
 viz/pom.xml                                        |  22 +++
 141 files changed, 898 insertions(+), 373 deletions(-)

diff --git a/.github/workflows/java.yml b/.github/workflows/java.yml
index 4b5daf0b..157b183e 100644
--- a/.github/workflows/java.yml
+++ b/.github/workflows/java.yml
@@ -16,18 +16,22 @@ jobs:
       fail-fast: true
       matrix:
         include:
-          - spark: 3.3.0
+          - spark: 3.4.0
             scala: 2.13.8
             jdk: '8'
             skipTests: ''
-          - spark: 3.3.0
+          - spark: 3.4.0
             scala: 2.12.15
             jdk: '8'
-            skipTests: ''            
-          - spark: 3.3.0
+            skipTests: ''
+          - spark: 3.4.0
             scala: 2.12.15
             jdk: '11'
             skipTests: ''
+          - spark: 3.3.0
+            scala: 2.12.15
+            jdk: '8'
+            skipTests: ''            
           - spark: 3.2.3
             scala: 2.12.15
             jdk: '8'
@@ -59,7 +63,12 @@ jobs:
         SPARK_VERSION: ${{ matrix.spark }}
         SCALA_VERSION: ${{ matrix.scala }}
         SKIP_TESTS: ${{ matrix.skipTests }}
-      run: mvn -q clean install -Dscala=${SCALA_VERSION:0:4} -Dspark.version=${SPARK_VERSION} ${SKIP_TESTS}
+      run: |
+        SPARK_COMPAT_VERSION="3.0"
+        if [ ${SPARK_VERSION:2:1} -gt "3" ]; then
+          SPARK_COMPAT_VERSION=${SPARK_VERSION:0:3}
+        fi
+        mvn -q clean install -Dspark=${SPARK_COMPAT_VERSION} -Dscala=${SCALA_VERSION:0:4} -Dspark.version=${SPARK_VERSION} ${SKIP_TESTS}
     - run: mkdir staging
     - run: cp viz/target/sedona-*.jar staging
     - run: cp spark-shaded/target/sedona-*.jar staging
@@ -68,4 +77,4 @@ jobs:
     - uses: actions/upload-artifact@v2
       with:
         name: generated-jars
-        path: staging
\ No newline at end of file
+        path: staging
diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml
index 4412d942..cbca8873 100644
--- a/.github/workflows/python.yml
+++ b/.github/workflows/python.yml
@@ -15,22 +15,26 @@ jobs:
     strategy:
       matrix:
         include:
-          - spark: '3.3.0'
+          - spark: '3.4.0'
             scala: '2.12.8'
             python: '3.10'
             hadoop: '3'
-          - spark: '3.3.0'
+          - spark: '3.4.0'
             scala: '2.12.8'
             python: '3.9'
             hadoop: '3'
-          - spark: '3.3.0'
+          - spark: '3.4.0'
             scala: '2.12.8'
             python: '3.8'
             hadoop: '3'
-          - spark: '3.3.0'
+          - spark: '3.4.0'
             scala: '2.12.8'
             python: '3.7'
             hadoop: '3'
+          - spark: '3.3.0'
+            scala: '2.12.8'
+            python: '3.8'
+            hadoop: '3'
           - spark: '3.2.0'
             scala: '2.12.8'
             python: '3.7'
@@ -61,7 +65,12 @@ jobs:
     - env:
         SPARK_VERSION: ${{ matrix.spark }}
         SCALA_VERSION: ${{ matrix.scala }}
-      run: if [ ${SPARK_VERSION:0:1} == "3" ]; then mvn -q clean install -DskipTests -Dscala=${SCALA_VERSION:0:4} -Dspark=3.0 -Dgeotools ; else mvn -q clean install -DskipTests -Dscala=${SCALA_VERSION:0:4} -Dspark=2.4 -Dgeotools ; fi
+      run: |
+          SPARK_COMPAT_VERSION="3.0"
+          if [ ${SPARK_VERSION:2:1} -gt "3" ]; then
+            SPARK_COMPAT_VERSION=${SPARK_VERSION:0:3}
+          fi
+          mvn -q clean install -DskipTests -Dspark=${SPARK_COMPAT_VERSION} -Dscala=${SCALA_VERSION:0:4} -Dgeotools
     - env:
         SPARK_VERSION: ${{ matrix.spark }}
         HADOOP_VERSION: ${{ matrix.hadoop }}
diff --git a/.github/workflows/r.yml b/.github/workflows/r.yml
index e5d46b58..d1f7e685 100644
--- a/.github/workflows/r.yml
+++ b/.github/workflows/r.yml
@@ -15,12 +15,14 @@ jobs:
     strategy:
       fail-fast: true
       matrix:
-        spark: [3.0.3, 3.1.2, 3.2.1, 3.3.0]
+        spark: [3.0.3, 3.1.2, 3.2.1, 3.3.0, 3.4.0]
+        hadoop: [3]
         scala: [2.12.15]
         r: [oldrel, release]
 
     env:
       SPARK_VERSION: ${{ matrix.spark }}
+      HADOOP_VERSION: ${{ matrix.hadoop }}
       SCALA_VERSION: ${{ matrix.scala }}
       # Ensure the temporary auth token for this workflow, instead of the
       # bundled GitHub PAT from the `remotes` package is used for
@@ -84,13 +86,23 @@ jobs:
           key: apache.sedona-apache-spark-${{ steps.os-name.outputs.os-name }}-${{ env.SPARK_VERSION }}
       - name: Build Sedona libraries
         run: |
-          if [ ${SPARK_VERSION:0:1} == "3" ]; then
-            mvn -q clean install -DskipTests -Dscala=${SCALA_VERSION:0:4} -Dspark=3.0 -Dgeotools
-          else
-            mvn -q clean install -DskipTests -Dscala=${SCALA_VERSION:0:4} -Dspark=2.4 -Dgeotools
+          SPARK_COMPAT_VERSION="3.0"
+          if [ ${SPARK_VERSION:2:1} -gt "3" ]; then
+            SPARK_COMPAT_VERSION=${SPARK_VERSION:0:3}
           fi
+          mvn -q clean install -DskipTests -Dspark=${SPARK_COMPAT_VERSION} -Dscala=${SCALA_VERSION:0:4}
       - name: Run tests
         run: |
+          if [[ "${SPARK_VERSION:0:3}" < "3.3" ]]; then
+            case "$HADOOP_VERSION" in
+              3)
+                export HADOOP_VERSION=3.2
+                ;;
+              2)
+                export HADOOP_VERSION=2.7
+                ;;
+            esac
+          fi
           export SPARKLYR_LOG_FILE='/tmp/sparklyr.log'
           source ./.github/workflows/scripts/prepare_sparklyr_sedona_test_env.sh
           echo "Apache Sedona jar files: ${SEDONA_JAR_FILES}"
diff --git a/R/tests/testthat/helper-initialize.R b/R/tests/testthat/helper-initialize.R
index 439a20d6..b399345a 100644
--- a/R/tests/testthat/helper-initialize.R
+++ b/R/tests/testthat/helper-initialize.R
@@ -19,9 +19,10 @@ testthat_spark_connection <- function(conn_retry_interval_s = 2) {
   conn_key <- ".testthat_spark_connection"
   if (!exists(conn_key, envir = .GlobalEnv)) {
     version <- Sys.getenv("SPARK_VERSION")
+    hadoop_version <- Sys.getenv("HADOOP_VERSION")
     spark_installed <- spark_installed_versions()
-    if (nrow(spark_installed[spark_installed$spark == version, ]) == 0) {
-      spark_install(version)
+    if (nrow(spark_installed[spark_installed$spark == version & spark_installed$hadoop_version == hadoop_version, ]) == 0) {
+      spark_install(version, hadoop_version)
     }
 
     conn_attempts <- 3
@@ -37,7 +38,8 @@ testthat_spark_connection <- function(conn_retry_interval_s = 2) {
             method = "shell",
             config = config,
             app_name = paste0("testthat-", uuid::UUIDgenerate()),
-            version = version
+            version = version,
+            hadoop_version = hadoop_version
           )
           assign(conn_key, sc, envir = .GlobalEnv)
           TRUE
diff --git a/common/pom.xml b/common/pom.xml
index c64771be..ed685ea7 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -32,6 +32,10 @@
     <url>http://sedona.apache.org/</url>
     <packaging>jar</packaging>
 
+    <properties>
+        <maven.deploy.skip>${skip.deploy.common.modules}</maven.deploy.skip>
+    </properties>
+
     <dependencies>
         <dependency>
             <groupId>org.slf4j</groupId>
@@ -83,19 +87,4 @@
             </plugin>
         </plugins>
     </build>
-    <profiles>
-        <profile>
-            <id>common-scala2.13</id>
-            <activation>
-                <property>
-                    <name>scala</name>
-                    <value>2.13</value>
-                </property>
-                <activeByDefault>false</activeByDefault>
-            </activation>
-            <properties>
-                <maven.deploy.skip>true</maven.deploy.skip>
-            </properties>
-        </profile>
-    </profiles>
 </project>
diff --git a/core/pom.xml b/core/pom.xml
index 45c84043..3e16e362 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -52,6 +52,8 @@
         <dependency>
             <groupId>org.apache.spark</groupId>
             <artifactId>spark-core_${scala.compat.version}</artifactId>
+            <version>${spark.version}</version>
+            <scope>provided</scope>
             <exclusions>
                 <!-- Make sure Hadoop's guava version is used for older versions of Spark tests -->
                 <exclusion>
@@ -67,11 +69,27 @@
                     <groupId>org.apache.hadoop</groupId>
                     <artifactId>hadoop-client-runtime</artifactId>
                 </exclusion>
+                <!-- Exclude log4j 1 for older versions of Spark-->
+                <exclusion>
+                    <groupId>log4j</groupId>
+                    <artifactId>log4j</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>org.slf4j</groupId>
+                    <artifactId>slf4j-log4j12</artifactId>
+                </exclusion>
+                <!-- Exclude log4j-slf4j2-impl for Spark 3.4 -->
+                <exclusion>
+                    <groupId>org.apache.logging.log4j</groupId>
+                    <artifactId>log4j-slf4j2-impl</artifactId>
+                </exclusion>
             </exclusions>
         </dependency>
         <dependency>
             <groupId>org.apache.spark</groupId>
             <artifactId>spark-sql_${scala.compat.version}</artifactId>
+            <version>${spark.version}</version>
+            <scope>provided</scope>
         </dependency>
         <dependency>
             <groupId>org.apache.hadoop</groupId>
@@ -122,12 +140,15 @@
         <dependency>
             <groupId>org.scala-lang</groupId>
             <artifactId>scala-library</artifactId>
+            <version>${scala.version}</version>
         </dependency>
 
         <!-- Test -->
         <dependency>
             <groupId>org.scalatest</groupId>
             <artifactId>scalatest_${scala.compat.version}</artifactId>
+            <version>${scalatest.version}</version>
+            <scope>test</scope>
         </dependency>
         <dependency>
             <groupId>org.apache.hadoop</groupId>
diff --git a/docs/setup/compile.md b/docs/setup/compile.md
index 0050bb63..b3ae72b7 100644
--- a/docs/setup/compile.md
+++ b/docs/setup/compile.md
@@ -21,6 +21,7 @@ To compile all modules, please make sure you are in the root folder of all modul
 	mvn clean install
 	```
 	The maven unit tests of all modules may take up to 30 minutes. 	
+
 === "With Geotools jars packaged"
 	```bash
 	mvn clean install -DskipTests -Dgeotools
@@ -33,14 +34,29 @@ To compile all modules, please make sure you are in the root folder of all modul
 
 ### Compile with different targets
 
-=== "Spark 3.0 + Scala 2.12"
+User can specify `-Dspark` and `-Dscala` command line options to compile with different targets. Available targets are:
+
+* `-Dspark`: `3.0` for Spark 3.0 to 3.3; `{major}.{minor}` for Spark 3.4 or later. For example, specify `-Dspark=3.4` to build for Spark 3.4.
+* `-Dscala`: `2.12` or `2.13`
+
+=== "Spark 3.0 to 3.3 Scala 2.12"
+	```
+	mvn clean install -DskipTests -Dspark=3.0 -Dscala=2.12
+	```
+=== "Spark 3.4+ Scala 2.12"
+	```
+	mvn clean install -DskipTests -Dspark=3.4 -Dscala=2.12
+	```
+    Please replace `3.4` with Spark major.minor version when building for higher Spark versions.
+=== "Spark 3.0 to 3.3 Scala 2.13"
 	```
-	mvn clean install -DskipTests -Dscala=2.12
+	mvn clean install -DskipTests -Dspark=3.0 -Dscala=2.13
 	```
-=== "Spark 3.0 + Scala 2.13"
+=== "Spark 3.4+ Scala 2.13"
 	```
-	mvn clean install -DskipTests -Dscala=2.13
+	mvn clean install -DskipTests -Dspark=3.4 -Dscala=2.13
 	```
+    Please replace `3.4` with Spark major.minor version when building for higher Spark versions.
 
 !!!tip
 	To get the Sedona Spark Shaded jar with all GeoTools jars included, simply append `-Dgeotools` option. The command is like this:`mvn clean install -DskipTests -Dscala=2.12 -Dspark=3.0 -Dgeotools`
@@ -84,6 +100,7 @@ pipenv install --dev
 6. Run the Python tests
 ```
 cd python
+pipenv run python setup.py build_ext --inplace
 pipenv run pytest tests
 ```
 ## Compile the documentation
diff --git a/docs/setup/install-python.md b/docs/setup/install-python.md
index f7a09f17..00505616 100644
--- a/docs/setup/install-python.md
+++ b/docs/setup/install-python.md
@@ -33,7 +33,10 @@ python3 setup.py install
 
 ### Prepare sedona-spark-shaded jar
 
-Sedona Python needs one additional jar file called `sedona-spark-shaded` to work properly. Please make sure you use the correct version for Spark and Scala. For Spark 3.0 + Scala 2.12, it is called `sedona-spark-shaded-3.0_2.12-{{ sedona.current_version }}.jar`
+Sedona Python needs one additional jar file called `sedona-spark-shaded` to work properly. Please make sure you use the correct version for Spark and Scala.
+
+* For Spark 3.0 to 3.3 and Scala 2.12, it is called `sedona-spark-shaded-3.0_2.12-{{ sedona.current_version }}.jar`
+* For Spark 3.4+ and Scala 2.12, it is called `sedona-spark-shaded-3.4_2.12-{{ sedona.current_version }}.jar`. If you are using Spark versions higher than 3.4, please replace the `3.4` in artifact names with the corresponding major.minor version numbers.
 
 You can get it using one of the following methods:
 
@@ -72,4 +75,4 @@ export SPARK_HOME=~/Downloads/spark-3.0.1-bin-hadoop2.7
 export PYTHONPATH=$SPARK_HOME/python
 ```
 
-You can then play with [Sedona Python Jupyter notebook](../../tutorial/jupyter-notebook/).
\ No newline at end of file
+You can then play with [Sedona Python Jupyter notebook](../../tutorial/jupyter-notebook/).
diff --git a/docs/setup/install-scala.md b/docs/setup/install-scala.md
index 6fe6f291..ddb792e5 100644
--- a/docs/setup/install-scala.md
+++ b/docs/setup/install-scala.md
@@ -15,16 +15,17 @@ There are two ways to use a Scala or Java library with Apache Spark. You can use
 ```
 ./bin/spark-shell --packages MavenCoordinates
 ```
+Please refer to [Sedona Maven Central coordinates](maven-coordinates.md) to select the corresponding Sedona packages for your Spark version.
 
-* Local mode: test Sedona without setting up a cluster
-```
-./bin/spark-shell --packages org.apache.sedona:sedona-spark-shaded-3.0_2.12:{{ sedona.current_version }},org.apache.sedona:sedona-viz-3.0_2.12:{{ sedona.current_version }},org.datasyslab:geotools-wrapper:{{ sedona.current_geotools }}
-```
+    * Local mode: test Sedona without setting up a cluster
+    ```
+    ./bin/spark-shell --packages org.apache.sedona:sedona-spark-shaded-3.0_2.12:{{ sedona.current_version }},org.apache.sedona:sedona-viz-3.0_2.12:{{ sedona.current_version }},org.datasyslab:geotools-wrapper:{{ sedona.current_geotools }}
+    ```
   
-* Cluster mode: you need to specify Spark Master IP
-```
-./bin/spark-shell --master spark://localhost:7077 --packages org.apache.sedona:sedona-spark-shaded-3.0_2.12:{{ sedona.current_version }},org.apache.sedona:sedona-viz-3.0_2.12:{{ sedona.current_version }},org.datasyslab:geotools-wrapper:{{ sedona.current_geotools }}
-```
+    * Cluster mode: you need to specify Spark Master IP
+    ```
+    ./bin/spark-shell --master spark://localhost:7077 --packages org.apache.sedona:sedona-spark-shaded-3.0_2.12:{{ sedona.current_version }},org.apache.sedona:sedona-viz-3.0_2.12:{{ sedona.current_version }},org.datasyslab:geotools-wrapper:{{ sedona.current_geotools }}
+    ```
   
 ### Download Sedona jar manually
 1. Have your Spark cluster ready.
@@ -36,16 +37,17 @@ There are two ways to use a Scala or Java library with Apache Spark. You can use
 ```
 ./bin/spark-shell --jars /Path/To/SedonaJars.jar
 ```
+If you are using Spark 3.0 to 3.3, please use jars with filenames containing `3.0`, such as `sedona-spark-shaded-3.0_2.12-{{ sedona.current_version }}`; If you are using Spark 3.4 or higher versions, please use jars with Spark major.minor versions in the filename, such as `sedona-spark-shaded-3.4_2.12-{{ sedona.current_version }}`.
  
-* Local mode: test Sedona without setting up a cluster
-```
-./bin/spark-shell --jars org.apache.sedona:sedona-spark-shaded-3.0_2.12:{{ sedona.current_version }},org.apache.sedona:sedona-viz-3.0_2.12:{{ sedona.current_version }},org.datasyslab:geotools-wrapper:{{ sedona.current_geotools }}
-```
-  
-* Cluster mode: you need to specify Spark Master IP  
-```
-./bin/spark-shell --master spark://localhost:7077 --jars org.apache.sedona:sedona-spark-shaded-3.0_2.12:{{ sedona.current_version }},org.apache.sedona:sedona-viz-3.0_2.12:{{ sedona.current_version }},org.datasyslab:geotools-wrapper:{{ sedona.current_geotools }}
-```
+    * Local mode: test Sedona without setting up a cluster
+    ```
+    ./bin/spark-shell --jars /path/to/sedona-spark-shaded-3.0_2.12-{{ sedona.current_version }}.jar,/path/to/sedona-viz-3.0_2.12-{{ sedona.current_version }}.jar,/path/to/geotools-wrapper-{{ sedona.current_geotools }}.jar
+    ```
+
+    * Cluster mode: you need to specify Spark Master IP
+    ```
+    ./bin/spark-shell --master spark://localhost:7077 --jars /path/to/sedona-spark-shaded-3.0_2.12-{{ sedona.current_version }}.jar,/path/to/sedona-viz-3.0_2.12-{{ sedona.current_version }}.jar,/path/to/geotools-wrapper-{{ sedona.current_geotools }}.jar
+    ```
 
 ## Spark SQL shell
 
@@ -64,4 +66,4 @@ A self-contained project allows you to create multiple Scala / Java files and wr
 ```
 
 !!!note
-	The detailed explanation of spark-submit is available on [Spark website](https://spark.apache.org/docs/latest/submitting-applications.html).
\ No newline at end of file
+	The detailed explanation of spark-submit is available on [Spark website](https://spark.apache.org/docs/latest/submitting-applications.html).
diff --git a/docs/setup/maven-coordinates.md b/docs/setup/maven-coordinates.md
index 9227975f..40155378 100644
--- a/docs/setup/maven-coordinates.md
+++ b/docs/setup/maven-coordinates.md
@@ -5,16 +5,23 @@
 
 !!!warning
 	For Scala/Java/Python users, this is the most common way to use Sedona in your environment. Do not use separate Sedona jars unless you are sure that you do not need shaded jars.
-	
+
 !!!warning
 	For R users, this is the only way to use Sedona in your environment.
 
+Apache Sedona provides different packages for each supported version of Spark.
+
+* For Spark 3.0 to 3.3, the artifacts to use should be `sedona-spark-shaded-3.0_2.12`, `sedona-vis-3.0_2.12`.
+* For Spark 3.4 or higher versions, please use the artifacts with Spark major.minor version in the artifact name. For example, for Spark 3.4, the artifacts to use should be `sedona-spark-shaded-3.4_2.12`, `sedona-vis-3.4_2.12`.
+
+If you are using the Scala 2.13 builds of Spark, please use the corresponding packages for Scala 2.13, which are suffixed by `_2.13`.
+
 The optional GeoTools library is required if you want to use CRS transformation, ShapefileReader or GeoTiff reader. This wrapper library is a re-distribution of GeoTools official jars. The only purpose of this library is to bring GeoTools jars from OSGEO repository to Maven Central. This library is under GNU Lesser General Public License (LGPL) license so we cannot package it in Sedona official release.
 
 !!! abstract "Sedona with Apache Spark"
 
-	=== "Spark 3.0+ and Scala 2.12"
-	
+	=== "Spark 3.0 to 3.3 and Scala 2.12"
+
 		```xml
 		<dependency>
 		  <groupId>org.apache.sedona</groupId>
@@ -33,9 +40,31 @@ The optional GeoTools library is required if you want to use CRS transformation,
 		    <version>{{ sedona.current_geotools }}</version>
 		</dependency>
 		```
-	
-	=== "Spark 3.0 and Scala 2.13"
-	
+
+	=== "Spark 3.4+ and Scala 2.12"
+
+		```xml
+		<dependency>
+		  <groupId>org.apache.sedona</groupId>
+		  <artifactId>sedona-spark-shaded-3.4_2.12</artifactId>
+		  <version>{{ sedona.current_version }}</version>
+		</dependency>
+		<dependency>
+		  <groupId>org.apache.sedona</groupId>
+		  <artifactId>sedona-viz-3.4_2.12</artifactId>
+		  <version>{{ sedona.current_version }}</version>
+		</dependency>
+		<!-- Optional: https://mvnrepository.com/artifact/org.datasyslab/geotools-wrapper -->
+		<dependency>
+		    <groupId>org.datasyslab</groupId>
+		    <artifactId>geotools-wrapper</artifactId>
+		    <version>{{ sedona.current_geotools }}</version>
+		</dependency>
+		```
+        If you are using Spark versions higher than 3.4, please replace the `3.4` in artifact names with the corresponding major.minor version numbers.
+
+	=== "Spark 3.0 to 3.3 and Scala 2.13"
+
 		```xml
 		<dependency>
 		  <groupId>org.apache.sedona</groupId>
@@ -55,11 +84,33 @@ The optional GeoTools library is required if you want to use CRS transformation,
 		</dependency>
 		```
 
+	=== "Spark 3.4+ and Scala 2.13"
+
+		```xml
+		<dependency>
+		  <groupId>org.apache.sedona</groupId>
+		  <artifactId>sedona-spark-shaded-3.4_2.13</artifactId>
+		  <version>{{ sedona.current_version }}</version>
+		</dependency>
+		<dependency>
+		  <groupId>org.apache.sedona</groupId>
+		  <artifactId>sedona-viz-3.4_2.13</artifactId>
+		  <version>{{ sedona.current_version }}</version>
+		</dependency>
+		<!-- Optional: https://mvnrepository.com/artifact/org.datasyslab/geotools-wrapper -->
+		<dependency>
+		    <groupId>org.datasyslab</groupId>
+		    <artifactId>geotools-wrapper</artifactId>
+		    <version>{{ sedona.current_geotools }}</version>
+		</dependency>
+		```
+        If you are using Spark versions higher than 3.4, please replace the `3.4` in artifact names with the corresponding major.minor version numbers.
+
 
 !!! abstract "Sedona with Apache Flink"
 
 	=== "Flink 1.12+ and Scala 2.12"
-	
+
 		```xml
 		<dependency>
 		  <groupId>org.apache.sedona</groupId>
@@ -88,7 +139,7 @@ Under BSD 3-clause (compatible with Apache 2.0 license)
 	=== "Sedona 1.3.1+"
 
 		Add unidata repo to your POM.xml
-		
+
 		```
 		<repositories>
 		    <repository>
@@ -98,9 +149,9 @@ Under BSD 3-clause (compatible with Apache 2.0 license)
 		    </repository>
 		</repositories>
 		```
-		
+
 		Then add cdm-core to your POM dependency.
-		
+
 		```xml
 		<dependency>
 		    <groupId>edu.ucar</groupId>
@@ -110,7 +161,7 @@ Under BSD 3-clause (compatible with Apache 2.0 license)
 		```
 
 	=== "Before Sedona 1.3.1"
-	
+
 		```xml
 		<!-- https://mvnrepository.com/artifact/org.datasyslab/sernetcdf -->
 		<dependency>
@@ -126,12 +177,18 @@ Under BSD 3-clause (compatible with Apache 2.0 license)
 !!!warning
 	For Scala, Java, Python users, please use the following jars only if you satisfy these conditions: (1) you know how to exclude transient dependencies in a complex application. (2) your environment has internet access (3) you are using some sort of Maven package resolver, or pom.xml, or build.sbt. It usually directly takes an input like this `GroupID:ArtifactID:Version`. If you don't understand what we are talking about, the following jars are not for you.
 
+Apache Sedona provides different packages for each supported version of Spark.
+
+* For Spark 3.0 to 3.3, the artifacts to use should be `sedona-core-3.0_2.12`, `sedona-sql-3.0_2.12`, `sedona-vis-3.0_2.12`, `sedona-python-adapter-3.0_2.12`.
+* For Spark 3.4 or higher versions, please use the artifacts with Spark major.minor version in the artifact name. For example, for Spark 3.4, the artifacts to use should be `sedona-core-3.4_2.12`, `sedona-sql-3.4_2.12`, `sedona-vis-3.4_2.12`, `sedona-python-adapter-3.4_2.12`.
+
+If you are using the Scala 2.13 builds of Spark, please use the corresponding packages for Scala 2.13, which are suffixed by `_2.13`.
+
 The optional GeoTools library is required if you want to use CRS transformation, ShapefileReader or GeoTiff reader. This wrapper library is a re-distribution of GeoTools official jars. The only purpose of this library is to bring GeoTools jars from OSGEO repository to Maven Central. This library is under GNU Lesser General Public License (LGPL) license so we cannot package it in Sedona official release.
 
 !!! abstract "Sedona with Apache Spark"
 
-	=== "Spark 3.0+ and Scala 2.12"
-	
+	=== "Spark 3.0 to 3.3 and Scala 2.12"
 		```xml
 		<dependency>
 		  <groupId>org.apache.sedona</groupId>
@@ -153,15 +210,44 @@ The optional GeoTools library is required if you want to use CRS transformation,
 		  <groupId>org.apache.sedona</groupId>
 		  <artifactId>sedona-python-adapter-3.0_2.12</artifactId>
 		  <version>{{ sedona.current_version }}</version>
-		</dependency>			
+		</dependency>
 		<dependency>
 		    <groupId>org.datasyslab</groupId>
 		    <artifactId>geotools-wrapper</artifactId>
 		    <version>{{ sedona.current_geotools }}</version>
-		</dependency>	
+		</dependency>
 		```
+	=== "Spark 3.4+ and Scala 2.12"
+		```xml
+		<dependency>
+		  <groupId>org.apache.sedona</groupId>
+		  <artifactId>sedona-core-3.4_2.12</artifactId>
+		  <version>{{ sedona.current_version }}</version>
+		</dependency>
+		<dependency>
+		  <groupId>org.apache.sedona</groupId>
+		  <artifactId>sedona-sql-3.4_2.12</artifactId>
+		  <version>{{ sedona.current_version }}</version>
+		</dependency>
+		<dependency>
+		  <groupId>org.apache.sedona</groupId>
+		  <artifactId>sedona-viz-3.4_2.12</artifactId>
+		  <version>{{ sedona.current_version }}</version>
+		</dependency>
+		<!-- Required if you use Sedona Python -->
+		<dependency>
+		  <groupId>org.apache.sedona</groupId>
+		  <artifactId>sedona-python-adapter-3.4_2.12</artifactId>
+		  <version>{{ sedona.current_version }}</version>
+		</dependency>
+		<dependency>
+		    <groupId>org.datasyslab</groupId>
+		    <artifactId>geotools-wrapper</artifactId>
+		    <version>{{ sedona.current_geotools }}</version>
+		</dependency>
+		```
+        If you are using Spark versions higher than 3.4, please replace the `3.4` in artifact names with the corresponding major.minor version numbers.
 	=== "Spark 3.0+ and Scala 2.13"
-	
 		```xml
 		<dependency>
 		  <groupId>org.apache.sedona</groupId>
@@ -183,20 +269,48 @@ The optional GeoTools library is required if you want to use CRS transformation,
 		  <groupId>org.apache.sedona</groupId>
 		  <artifactId>sedona-python-adapter-3.0_2.12</artifactId>
 		  <version>{{ sedona.current_version }}</version>
-		</dependency>	
+		</dependency>
+		<dependency>
+		    <groupId>org.datasyslab</groupId>
+		    <artifactId>geotools-wrapper</artifactId>
+		    <version>{{ sedona.current_geotools }}</version>
+		</dependency>
+		```
+	=== "Spark 3.4+ and Scala 2.13"
+		```xml
+		<dependency>
+		  <groupId>org.apache.sedona</groupId>
+		  <artifactId>sedona-core-3.4_2.13</artifactId>
+		  <version>{{ sedona.current_version }}</version>
+		</dependency>
+		<dependency>
+		  <groupId>org.apache.sedona</groupId>
+		  <artifactId>sedona-sql-3.4_2.13</artifactId>
+		  <version>{{ sedona.current_version }}</version>
+		</dependency>
+		<dependency>
+		  <groupId>org.apache.sedona</groupId>
+		  <artifactId>sedona-viz-3.4_2.13</artifactId>
+		  <version>{{ sedona.current_version }}</version>
+		</dependency>
+		<!-- Required if you use Sedona Python -->
+		<dependency>
+		  <groupId>org.apache.sedona</groupId>
+		  <artifactId>sedona-python-adapter-3.4_2.12</artifactId>
+		  <version>{{ sedona.current_version }}</version>
+		</dependency>
 		<dependency>
 		    <groupId>org.datasyslab</groupId>
 		    <artifactId>geotools-wrapper</artifactId>
 		    <version>{{ sedona.current_geotools }}</version>
-		</dependency>		
+		</dependency>
 		```
-		
-	
+        If you are using Spark versions higher than 3.4, please replace the `3.4` in artifact names with the corresponding major.minor version numbers.
 
 !!! abstract "Sedona with Apache Flink"
 
 	=== "Flink 1.12+ and Scala 2.12"
-	
+
 		```xml
 		<dependency>
 		  <groupId>org.apache.sedona</groupId>
@@ -217,7 +331,7 @@ The optional GeoTools library is required if you want to use CRS transformation,
 		    <groupId>org.datasyslab</groupId>
 		    <artifactId>geotools-wrapper</artifactId>
 		    <version>{{ sedona.current_geotools }}</version>
-		</dependency>		
+		</dependency>
 		```
 
 
@@ -234,7 +348,7 @@ Under BSD 3-clause (compatible with Apache 2.0 license)
 	=== "Sedona 1.3.1+"
 
 		Add unidata repo to your POM.xml
-		
+
 		```
 		<repositories>
 		    <repository>
@@ -244,9 +358,9 @@ Under BSD 3-clause (compatible with Apache 2.0 license)
 		    </repository>
 		</repositories>
 		```
-		
+
 		Then add cdm-core to your POM dependency.
-		
+
 		```xml
 		<dependency>
 		    <groupId>edu.ucar</groupId>
@@ -256,7 +370,7 @@ Under BSD 3-clause (compatible with Apache 2.0 license)
 		```
 
 	=== "Before Sedona 1.3.1"
-	
+
 		```xml
 		<!-- https://mvnrepository.com/artifact/org.datasyslab/sernetcdf -->
 		<dependency>
diff --git a/docs/setup/platform.md b/docs/setup/platform.md
index bd54e8af..d1a4481b 100644
--- a/docs/setup/platform.md
+++ b/docs/setup/platform.md
@@ -5,25 +5,25 @@ Sedona binary releases are compiled by Java 1.8 and Scala 2.11/2.12 and tested i
 
 === "Sedona Scala/Java"
 	
-	|             | Spark 2.4 | Spark 3.0 | Spark 3.1 | Spark 3.2| Spark 3.3|
-	|:-----------:| :---------:|:---------:|:---------:|:---------:|:---------:|
-	| Scala 2.11  |  not tested  |  not tested  | not tested  | not tested  |not tested  |
-	| Scala 2.12 | not tested  |  ✅  | ✅ | ✅ |✅ |
-	| Scala 2.13 |  not tested  | not tested  | not tested  | not tested|✅ |
+	|             | Spark 2.4 | Spark 3.0 | Spark 3.1 | Spark 3.2| Spark 3.3| Spark 3.4|
+	|:-----------:| :---------:|:---------:|:---------:|:---------:|:---------:|:---------:|
+	| Scala 2.11  |  not tested  |  not tested  | not tested  | not tested  |not tested  |not tested |
+	| Scala 2.12 | not tested  |  ✅  | ✅ | ✅ |✅ |✅ |
+	| Scala 2.13 |  not tested  | not tested  | not tested  | not tested|✅ |✅ |
 
 === "Sedona Python"
 	
-	|             | Spark 2.4 (Scala 2.11) | Spark 3.0 (Scala 2.12)|Spark 3.1 (Scala 2.12)| Spark 3.2 (Scala 2.12)| Spark 3.3 (Scala 2.12)|
-	|:-----------:|:---------:|:---------:|:---------:|:---------:|:---------:|
-	| Python 3.7  |  not tested  |  ✅  |  ✅  |  ✅  |  ✅  |
-	| Python 3.8 | not tested  |  not tested  |not tested  |not tested  |  ✅  |
-	| Python 3.9 | not tested  |  not tested  |not tested  |not tested  |  ✅  |
-	| Python 3.10 | not tested  |  not tested  |not tested  |not tested  |  ✅  |
+	|             | Spark 2.4 (Scala 2.11) | Spark 3.0 (Scala 2.12)|Spark 3.1 (Scala 2.12)| Spark 3.2 (Scala 2.12)| Spark 3.3 (Scala 2.12)|Spark 3.4 (Scala 2.12)|
+	|:-----------:|:---------:|:---------:|:---------:|:---------:|:---------:|:---------:|
+	| Python 3.7  |  not tested  |  ✅  |  ✅  |  ✅  |  ✅  |  ✅  |
+	| Python 3.8 | not tested  |  not tested  |not tested  |not tested  |  ✅  |  ✅  |
+	| Python 3.9 | not tested  |  not tested  |not tested  |not tested  |  ✅  |  ✅  |
+	| Python 3.10 | not tested  |  not tested  |not tested  |not tested  |  ✅  |  ✅  |
 
 === "Sedona R"
 	
-	|             | Spark 2.4 | Spark 3.0 | Spark 3.1 | Spark 3.2 | Spark 3.3 | 
-	|:-----------:| :---------:|:---------:|:---------:|:---------:|:---------:|
-	| Scala 2.11  |  not tested  |  not tested  | not tested  | not tested  | not tested  |
-	| Scala 2.12 | not tested  |  ✅  | ✅ |  ✅ | ✅ | ✅ |
+	|             | Spark 2.4 | Spark 3.0 | Spark 3.1 | Spark 3.2 | Spark 3.3 | Spark 3.4 |
+	|:-----------:| :---------:|:---------:|:---------:|:---------:|:---------:|:---------:|
+	| Scala 2.11  |  not tested  |  not tested  | not tested  | not tested  | not tested  |not tested  |
+	| Scala 2.12 | not tested  |  ✅  | ✅ |  ✅ | ✅ | ✅ | ✅ |
 
diff --git a/docs/tutorial/sql-pure-sql.md b/docs/tutorial/sql-pure-sql.md
index cbc1a8a8..56a30702 100644
--- a/docs/tutorial/sql-pure-sql.md
+++ b/docs/tutorial/sql-pure-sql.md
@@ -5,14 +5,28 @@ SedonaSQL supports SQL/MM Part3 Spatial SQL Standard. Detailed SedonaSQL APIs ar
 
 ## Initiate Session
 
-Start `spark-sql` as following (replace `<VERSION>` with actual version, like, `1.0.1-incubating`):
+Start `spark-sql` as following (replace `<VERSION>` with actual version, like, `1.0.1-incubating` or `{{ sedona.current_version }}`):
 
-```sh
-spark-sql --packages org.apache.sedona:sedona-spark-shaded-3.0_2.12:<VERSION>,org.apache.sedona:sedona-viz-3.0_2.12:<VERSION>,org.datasyslab:geotools-wrapper:geotools-24.0 \
-  --conf spark.serializer=org.apache.spark.serializer.KryoSerializer \
-  --conf spark.kryo.registrator=org.apache.sedona.viz.core.Serde.SedonaVizKryoRegistrator \
-  --conf spark.sql.extensions=org.apache.sedona.viz.sql.SedonaVizExtensions,org.apache.sedona.sql.SedonaSqlExtensions
-```
+!!! abstract "Run spark-sql with Apache Sedona"
+
+	=== "Spark 3.0 to 3.3 and Scala 2.12"
+
+        ```sh
+        spark-sql --packages org.apache.sedona:sedona-spark-shaded-3.0_2.12:<VERSION>,org.apache.sedona:sedona-viz-3.0_2.12:<VERSION>,org.datasyslab:geotools-wrapper:geotools-24.0 \
+          --conf spark.serializer=org.apache.spark.serializer.KryoSerializer \
+          --conf spark.kryo.registrator=org.apache.sedona.viz.core.Serde.SedonaVizKryoRegistrator \
+          --conf spark.sql.extensions=org.apache.sedona.viz.sql.SedonaVizExtensions,org.apache.sedona.sql.SedonaSqlExtensions
+        ```
+
+	=== "Spark 3.4+ and Scala 2.12"
+
+        ```sh
+        spark-sql --packages org.apache.sedona:sedona-spark-shaded-3.4_2.12:<VERSION>,org.apache.sedona:sedona-viz-3.4_2.12:<VERSION>,org.datasyslab:geotools-wrapper:geotools-24.0 \
+          --conf spark.serializer=org.apache.spark.serializer.KryoSerializer \
+          --conf spark.kryo.registrator=org.apache.sedona.viz.core.Serde.SedonaVizKryoRegistrator \
+          --conf spark.sql.extensions=org.apache.sedona.viz.sql.SedonaVizExtensions,org.apache.sedona.sql.SedonaSqlExtensions
+        ```
+        If you are using Spark versions higher than 3.4, please replace the `3.4` in artifact names with the corresponding major.minor version of Spark.
 
 
 This will register all User Defined Tyeps, functions and optimizations in SedonaSQL and SedonaViz.
diff --git a/docs/tutorial/sql.md b/docs/tutorial/sql.md
index 4a0f60f5..6a929527 100644
--- a/docs/tutorial/sql.md
+++ b/docs/tutorial/sql.md
@@ -89,6 +89,7 @@ Use the following code to initiate your SparkSession at the beginning:
 	           'org.datasyslab:geotools-wrapper:{{ sedona.current_geotools }}'). \
 	    getOrCreate()
 	```
+    If you are using Spark versions >= 3.4, please replace the `3.0` in package name of sedona-spark-shaded with the corresponding major.minor version of Spark, such as `sedona-spark-shaded-3.4_2.12:{{ sedona.current_version }}`.
 
 !!!warning
 	Sedona has a suite of well-written geometry and index serializers. Forgetting to enable these serializers will lead to high memory consumption and slow performance.
@@ -630,4 +631,4 @@ case. Columns for the left and right user data must be provided.
 	  StructField("category", StringType, nullable = true)
 	))
 	val joinResultDf = Adapter.toDf(joinResultPairRDD, schema, sparkSession)
-	```
\ No newline at end of file
+	```
diff --git a/flink-shaded/pom.xml b/flink-shaded/pom.xml
index af788c88..9fc14bc7 100644
--- a/flink-shaded/pom.xml
+++ b/flink-shaded/pom.xml
@@ -32,7 +32,7 @@
     <packaging>jar</packaging>
 
     <properties>
-        <maven.deploy.skip>false</maven.deploy.skip>
+        <maven.deploy.skip>${skip.deploy.common.modules}</maven.deploy.skip>
         <cdm.scope>compile</cdm.scope>
     </properties>
 
@@ -51,17 +51,6 @@
     <build>
         <sourceDirectory>src/main/scala</sourceDirectory>
         <plugins>
-            <plugin>
-                <groupId>io.paradoxical</groupId>
-                <artifactId>resolved-pom-maven-plugin</artifactId>
-                <version>1.0</version>
-                <executions>
-                    <execution>
-                        <id>resolve-my-pom</id>
-                        <phase>none</phase>
-                    </execution>
-                </executions>
-            </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-shade-plugin</artifactId>
diff --git a/flink/pom.xml b/flink/pom.xml
index 1fd07702..9c52f3b9 100644
--- a/flink/pom.xml
+++ b/flink/pom.xml
@@ -33,7 +33,7 @@
 	<packaging>jar</packaging>
 
     <properties>
-        <maven.deploy.skip>false</maven.deploy.skip>
+        <maven.deploy.skip>${skip.deploy.common.modules}</maven.deploy.skip>
         <flink.version>1.14.3</flink.version>
         <flink.scope>provided</flink.scope>
     </properties>
@@ -129,10 +129,13 @@
         <dependency>
             <groupId>org.scala-lang</groupId>
             <artifactId>scala-library</artifactId>
+            <version>${scala.version}</version>
         </dependency>
         <dependency>
             <groupId>org.scalatest</groupId>
             <artifactId>scalatest_${scala.compat.version}</artifactId>
+            <version>${scalatest.version}</version>
+            <scope>test</scope>
         </dependency>
         <dependency>
             <groupId>org.apache.logging.log4j</groupId>
diff --git a/pom.xml b/pom.xml
index 2e923650..a98c97df 100644
--- a/pom.xml
+++ b/pom.xml
@@ -63,7 +63,8 @@
         <java.version>1.8</java.version>
         <maven.compiler.source>${java.version}</maven.compiler.source>
         <maven.compiler.target>${java.version}</maven.compiler.target>
-        <maven.deploy.skip>false</maven.deploy.skip>
+        <skip.deploy.common.modules>false</skip.deploy.common.modules>
+        <maven.deploy.skip>${skip.deploy.common.modules}</maven.deploy.skip>
         <maven.compiler.plugin.version>3.10.1</maven.compiler.plugin.version>
 
         <cdm.version>5.4.2</cdm.version>
@@ -72,15 +73,18 @@
         <jackson.version>2.13.4</jackson.version>
         <jts.version>1.19.0</jts.version>
         <jts2geojson.version>0.16.1</jts2geojson.version>
-        <log4j.version>2.17.2</log4j.version>
-        <slf4j.version>1.7.36</slf4j.version>
+
+        <!-- Actual scala, spark and log4j version will be set by activated profiles.
+             Setting a default value helps IDE:s that can't make sense of profiles. -->
+        <scala.compat.version>2.12</scala.compat.version>
         <spark.version>3.3.0</spark.version>
         <spark.compat.version>3.0</spark.compat.version>
-        <googles2.version>2.0.0</googles2.version>
+        <log4j.version>2.17.2</log4j.version>
 
-        <!-- Actual scala version will be set by a profile.
-        Setting a default value helps IDE:s that can't make sense of profiles. -->
-        <scala.compat.version>2.12</scala.compat.version>
+        <slf4j.version>1.7.36</slf4j.version>
+        <googles2.version>2.0.0</googles2.version>
+        <scalatest.version>3.1.1</scalatest.version>
+        <scala-collection-compat.version>2.5.0</scala-collection-compat.version>
 
         <geotools.scope>provided</geotools.scope>
         <!-- Because it's not in Maven central, make it provided by default -->
@@ -225,29 +229,6 @@
                 <version>${geotools.version}</version>
                 <scope>${geotools.scope}</scope>
             </dependency>
-            <dependency>
-                <groupId>org.apache.spark</groupId>
-                <artifactId>spark-core_${scala.compat.version}</artifactId>
-                <version>${spark.version}</version>
-                <scope>provided</scope>
-                <!-- Exclude log4j 1 for older versions of Spark-->
-                <exclusions>
-                    <exclusion>
-                        <groupId>log4j</groupId>
-                        <artifactId>log4j</artifactId>
-                    </exclusion>
-                    <exclusion>
-                        <groupId>org.slf4j</groupId>
-                        <artifactId>slf4j-log4j12</artifactId>
-                    </exclusion>
-                </exclusions>
-            </dependency>
-            <dependency>
-                <groupId>org.apache.spark</groupId>
-                <artifactId>spark-sql_${scala.compat.version}</artifactId>
-                <version>${spark.version}</version>
-                <scope>provided</scope>
-            </dependency>
            <dependency>
                 <groupId>org.apache.hadoop</groupId>
                 <artifactId>hadoop-client</artifactId>
@@ -298,21 +279,6 @@
                 <artifactId>commons-lang</artifactId>
                 <version>2.6</version>
             </dependency>
-            <dependency>
-                <groupId>org.scala-lang</groupId>
-                <artifactId>scala-library</artifactId>
-                <version>${scala.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>org.scala-lang</groupId>
-                <artifactId>scala-compiler</artifactId>
-                <version>${scala.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>org.scala-lang.modules</groupId>
-                <artifactId>scala-collection-compat_${scala.compat.version}</artifactId>
-                <version>2.5.0</version>
-            </dependency>
 
             <dependency>
                 <groupId>junit</groupId>
@@ -320,12 +286,6 @@
                 <version>4.13.1</version>
                 <scope>test</scope>
             </dependency>
-            <dependency>
-                <groupId>org.scalatest</groupId>
-                <artifactId>scalatest_${scala.compat.version}</artifactId>
-                <version>3.1.1</version>
-                <scope>test</scope>
-            </dependency>
             <dependency>
                 <groupId>com.google.geometry</groupId>
                 <artifactId>s2-geometry</artifactId>
@@ -522,6 +482,8 @@
                     <properties>
                         <spark.compat.version>${spark.compat.version}</spark.compat.version>
                         <scala.compat.version>${scala.compat.version}</scala.compat.version>
+                        <spark.version>${spark.version}</spark.version>
+                        <scala.version>${scala.version}</scala.version>
                     </properties>
                 </configuration>
                 <executions>
@@ -585,6 +547,38 @@
                 </repository>
             </repositories>
         </profile>
+        <profile>
+            <!-- This profile works for Spark 3.0, 3.1, 3.2 and 3.3 -->
+            <id>sedona-spark-3.0</id>
+            <activation>
+                <property>
+                    <name>spark</name>
+                    <value>3.0</value>
+                </property>
+                <activeByDefault>true</activeByDefault>
+            </activation>
+            <properties>
+                <spark.version>3.3.0</spark.version>
+                <spark.compat.version>3.0</spark.compat.version>
+                <log4j.version>2.17.2</log4j.version>
+            </properties>
+        </profile>
+        <profile>
+            <id>sedona-spark-3.4</id>
+            <activation>
+                <property>
+                    <name>spark</name>
+                    <value>3.4</value>
+                </property>
+            </activation>
+            <properties>
+                <spark.version>3.4.0</spark.version>
+                <spark.compat.version>3.4</spark.compat.version>
+                <log4j.version>2.19.0</log4j.version>
+                <!-- Skip deploying parent module for sedona-spark-3.0 profile, it will be deployed with sedona-spark-3.0 -->
+                <skip.deploy.common.modules>true</skip.deploy.common.modules>
+            </properties>
+        </profile>
         <profile>
             <id>scala2.13</id>
             <activation>
@@ -599,7 +593,7 @@
                 <scala.compat.version>2.13</scala.compat.version>
                 <scaladoc.arg>-no-java-comments</scaladoc.arg>
                 <!-- Skip deploying parent module for Scala 2.13 profile, it will be deployed with 2.12 -->
-                <maven.deploy.skip>true</maven.deploy.skip>
+                <skip.deploy.common.modules>true</skip.deploy.common.modules>
             </properties>
             <modules>
                 <module>common</module>
diff --git a/python-adapter/pom.xml b/python-adapter/pom.xml
index c469cdb9..7582c761 100644
--- a/python-adapter/pom.xml
+++ b/python-adapter/pom.xml
@@ -61,17 +61,35 @@
        <dependency>
             <groupId>org.apache.spark</groupId>
             <artifactId>spark-core_${scala.compat.version}</artifactId>
+            <version>${spark.version}</version>
+            <scope>provided</scope>
             <exclusions>
                 <!-- Make sure Hadoop's guava version is used for older versions of Spark tests -->
                 <exclusion>
                     <groupId>com.google.guava</groupId>
                     <artifactId>guava</artifactId>
                 </exclusion>
+                <!-- Exclude log4j 1 for older versions of Spark-->
+                <exclusion>
+                    <groupId>log4j</groupId>
+                    <artifactId>log4j</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>org.slf4j</groupId>
+                    <artifactId>slf4j-log4j12</artifactId>
+                </exclusion>
+                <!-- Exclude log4j-slf4j2-impl for Spark 3.4 -->
+                <exclusion>
+                    <groupId>org.apache.logging.log4j</groupId>
+                    <artifactId>log4j-slf4j2-impl</artifactId>
+                </exclusion>
             </exclusions>
        </dependency>
        <dependency>
             <groupId>org.apache.spark</groupId>
             <artifactId>spark-sql_${scala.compat.version}</artifactId>
+            <version>${spark.version}</version>
+            <scope>provided</scope>
        </dependency>
        <dependency>
             <groupId>org.apache.hadoop</groupId>
@@ -96,14 +114,18 @@
         <dependency>
             <groupId>org.scala-lang</groupId>
             <artifactId>scala-library</artifactId>
+            <version>${scala.version}</version>
         </dependency>
         <dependency>
             <groupId>org.scala-lang.modules</groupId>
             <artifactId>scala-collection-compat_${scala.compat.version}</artifactId>
+            <version>${scala-collection-compat.version}</version>
         </dependency>
         <dependency>
             <groupId>org.scalatest</groupId>
             <artifactId>scalatest_${scala.compat.version}</artifactId>
+            <version>${scalatest.version}</version>
+            <scope>test</scope>
         </dependency>
     </dependencies>
     <build>
diff --git a/spark-shaded/pom.xml b/spark-shaded/pom.xml
index edac4373..ca6ffc4c 100644
--- a/spark-shaded/pom.xml
+++ b/spark-shaded/pom.xml
@@ -98,17 +98,6 @@
     <build>
         <sourceDirectory>src/main/scala</sourceDirectory>
         <plugins>
-            <plugin>
-                <groupId>io.paradoxical</groupId>
-                <artifactId>resolved-pom-maven-plugin</artifactId>
-                <version>1.0</version>
-                <executions>
-                    <execution>
-                        <id>resolve-my-pom</id>
-                        <phase>none</phase>
-                    </execution>
-                </executions>
-            </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-shade-plugin</artifactId>
diff --git a/sql/common/.gitignore b/sql/common/.gitignore
new file mode 100644
index 00000000..1cc6c4a1
--- /dev/null
+++ b/sql/common/.gitignore
@@ -0,0 +1,12 @@
+/target/
+/.settings/
+/.classpath
+/.project
+/dependency-reduced-pom.xml
+/doc/
+/.idea/
+*.iml
+/latest/
+/spark-warehouse/
+/metastore_db/
+*.log
diff --git a/sql/pom.xml b/sql/common/pom.xml
similarity index 89%
copy from sql/pom.xml
copy to sql/common/pom.xml
index 129a2121..5278c05e 100644
--- a/sql/pom.xml
+++ b/sql/common/pom.xml
@@ -21,14 +21,14 @@
 	<modelVersion>4.0.0</modelVersion>
     <parent>
         <groupId>org.apache.sedona</groupId>
-        <artifactId>sedona-parent</artifactId>
+        <artifactId>sedona-sql-parent-${spark.compat.version}_${scala.compat.version}</artifactId>
         <version>1.4.1-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
-	<artifactId>sedona-sql-${spark.compat.version}_${scala.compat.version}</artifactId>
+	<artifactId>sedona-sql-common-${spark.compat.version}_${scala.compat.version}</artifactId>
 
 	<name>${project.groupId}:${project.artifactId}</name>
-	<description>A cluster computing system for processing large-scale spatial data: SQL API.</description>
+	<description>A cluster computing system for processing large-scale spatial data: Common SQL API.</description>
     <url>http://sedona.apache.org/</url>
 	<packaging>jar</packaging>
 
@@ -57,13 +57,6 @@
         <dependency>
             <groupId>org.apache.spark</groupId>
             <artifactId>spark-core_${scala.compat.version}</artifactId>
-            <exclusions>
-                <!-- Make sure Hadoop's guava version is used for older versions of Spark tests -->
-                <exclusion>
-                    <groupId>com.google.guava</groupId>
-                    <artifactId>guava</artifactId>
-                </exclusion>
-            </exclusions>
         </dependency>
         <dependency>
             <groupId>org.apache.spark</groupId>
@@ -128,8 +121,8 @@
             <artifactId>scalatest_${scala.compat.version}</artifactId>
         </dependency>
         <dependency>
-                <groupId>org.mockito</groupId>
-                <artifactId>mockito-inline</artifactId>
+            <groupId>org.mockito</groupId>
+            <artifactId>mockito-inline</artifactId>
         </dependency>
     </dependencies>
 	<build>
diff --git a/sql/src/.gitignore b/sql/common/src/.gitignore
similarity index 100%
rename from sql/src/.gitignore
rename to sql/common/src/.gitignore
diff --git a/sql/src/main/.gitignore b/sql/common/src/main/.gitignore
similarity index 100%
rename from sql/src/main/.gitignore
rename to sql/common/src/main/.gitignore
diff --git a/sql/src/main/scala/org/apache/sedona/sql/SedonaSqlExtensions.scala b/sql/common/src/main/scala/org/apache/sedona/sql/SedonaSqlExtensions.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/sedona/sql/SedonaSqlExtensions.scala
rename to sql/common/src/main/scala/org/apache/sedona/sql/SedonaSqlExtensions.scala
diff --git a/sql/src/main/scala/org/apache/sedona/sql/UDF/Catalog.scala b/sql/common/src/main/scala/org/apache/sedona/sql/UDF/Catalog.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/sedona/sql/UDF/Catalog.scala
rename to sql/common/src/main/scala/org/apache/sedona/sql/UDF/Catalog.scala
diff --git a/sql/src/main/scala/org/apache/sedona/sql/UDF/UdfRegistrator.scala b/sql/common/src/main/scala/org/apache/sedona/sql/UDF/UdfRegistrator.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/sedona/sql/UDF/UdfRegistrator.scala
rename to sql/common/src/main/scala/org/apache/sedona/sql/UDF/UdfRegistrator.scala
diff --git a/sql/src/main/scala/org/apache/sedona/sql/UDT/UdtRegistrator.scala b/sql/common/src/main/scala/org/apache/sedona/sql/UDT/UdtRegistrator.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/sedona/sql/UDT/UdtRegistrator.scala
rename to sql/common/src/main/scala/org/apache/sedona/sql/UDT/UdtRegistrator.scala
diff --git a/sql/src/main/scala/org/apache/sedona/sql/utils/Adapter.scala b/sql/common/src/main/scala/org/apache/sedona/sql/utils/Adapter.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/sedona/sql/utils/Adapter.scala
rename to sql/common/src/main/scala/org/apache/sedona/sql/utils/Adapter.scala
diff --git a/sql/src/main/scala/org/apache/sedona/sql/utils/GeometrySerializer.scala b/sql/common/src/main/scala/org/apache/sedona/sql/utils/GeometrySerializer.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/sedona/sql/utils/GeometrySerializer.scala
rename to sql/common/src/main/scala/org/apache/sedona/sql/utils/GeometrySerializer.scala
diff --git a/sql/src/main/scala/org/apache/sedona/sql/utils/IndexSerializer.scala b/sql/common/src/main/scala/org/apache/sedona/sql/utils/IndexSerializer.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/sedona/sql/utils/IndexSerializer.scala
rename to sql/common/src/main/scala/org/apache/sedona/sql/utils/IndexSerializer.scala
diff --git a/sql/src/main/scala/org/apache/sedona/sql/utils/SedonaSQLRegistrator.scala b/sql/common/src/main/scala/org/apache/sedona/sql/utils/SedonaSQLRegistrator.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/sedona/sql/utils/SedonaSQLRegistrator.scala
rename to sql/common/src/main/scala/org/apache/sedona/sql/utils/SedonaSQLRegistrator.scala
diff --git a/sql/common/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormatBase.scala b/sql/common/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormatBase.scala
new file mode 100644
index 00000000..69c98624
--- /dev/null
+++ b/sql/common/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormatBase.scala
@@ -0,0 +1,33 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet
+
+import org.apache.spark.sql.execution.datasources.FileFormat
+import org.apache.spark.sql.sources.DataSourceRegister
+
+/**
+  * Trait of GeoParquetFileFormat defines a method for spatial predicate push down. The actual implementations
+  * of GeoParquetFileFormat are in the sql/spark-3.x submodule for specific Spark minor versions.
+  */
+trait GeoParquetFileFormatBase extends FileFormat with DataSourceRegister {
+
+  override def shortName(): String = "geoparquet"
+
+  /**
+    * Create a new GeoParquetFileFormat object with specified spatialFilter
+    * @param spatialFilter spatial filter pushed down to GeoParquetFileFormat
+    * @return a new GeoParquetFileFormat object
+    */
+  def withSpatialPredicates(spatialFilter: GeoParquetSpatialFilter): GeoParquetFileFormatBase
+}
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetMetaData.scala b/sql/common/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetMetaData.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetMetaData.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetMetaData.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetSpatialFilter.scala b/sql/common/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetSpatialFilter.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetSpatialFilter.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetSpatialFilter.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/GeometryUDT.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/GeometryUDT.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/GeometryUDT.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/GeometryUDT.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/IndexUDT.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/IndexUDT.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/IndexUDT.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/IndexUDT.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/RasterUDT.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/RasterUDT.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/RasterUDT.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/RasterUDT.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/UdtRegistratorWrapper.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/UdtRegistratorWrapper.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/UdtRegistratorWrapper.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/UDT/UdtRegistratorWrapper.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/execution/SedonaSparkPlan.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/execution/SedonaSparkPlan.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/execution/SedonaSparkPlan.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/execution/SedonaSparkPlan.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/AggregateFunctions.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/AggregateFunctions.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/AggregateFunctions.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/AggregateFunctions.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/Constructors.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/Constructors.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/Constructors.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/Constructors.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/DataFrameAPI.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/DataFrameAPI.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/DataFrameAPI.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/DataFrameAPI.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/Functions.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/Functions.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/Functions.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/Functions.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/NullSafeExpressions.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/NullSafeExpressions.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/NullSafeExpressions.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/NullSafeExpressions.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/Predicates.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/Predicates.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/Predicates.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/Predicates.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/SerdeAware.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/SerdeAware.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/SerdeAware.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/SerdeAware.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/collect/ST_Collect.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/collect/ST_Collect.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/collect/ST_Collect.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/collect/ST_Collect.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/geohash/GeoHashDecoder.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/geohash/GeoHashDecoder.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/geohash/GeoHashDecoder.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/geohash/GeoHashDecoder.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/implicits.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/implicits.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/implicits.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/implicits.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/Constructors.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/Constructors.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/Constructors.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/Constructors.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/Functions.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/Functions.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/Functions.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/Functions.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/IO.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/IO.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/IO.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/IO.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/implicits.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/implicits.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/implicits.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/raster/implicits.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_aggregates.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_aggregates.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_aggregates.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_aggregates.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_constructors.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_constructors.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_constructors.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_constructors.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_functions.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_functions.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_functions.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_functions.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_predicates.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_predicates.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_predicates.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/expressions/st_predicates.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/io/GeotiffSchema.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/io/GeotiffSchema.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/io/GeotiffSchema.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/io/GeotiffSchema.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/io/HadoopUtils.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/io/HadoopUtils.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/io/HadoopUtils.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/io/HadoopUtils.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/io/ImageReadOptions.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/io/ImageReadOptions.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/io/ImageReadOptions.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/io/ImageReadOptions.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/io/ImageWriteOptions.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/io/ImageWriteOptions.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/io/ImageWriteOptions.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/io/ImageWriteOptions.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/optimization/ExpressionUtils.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/optimization/ExpressionUtils.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/optimization/ExpressionUtils.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/optimization/ExpressionUtils.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/optimization/SpatialFilterPushDownForGeoParquet.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/optimization/SpatialFilterPushDownForGeoParquet.scala
similarity index 99%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/optimization/SpatialFilterPushDownForGeoParquet.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/optimization/SpatialFilterPushDownForGeoParquet.scala
index 2f8134ea..3bd38801 100644
--- a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/optimization/SpatialFilterPushDownForGeoParquet.scala
+++ b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/optimization/SpatialFilterPushDownForGeoParquet.scala
@@ -39,7 +39,7 @@ import org.apache.spark.sql.execution.datasources.HadoopFsRelation
 import org.apache.spark.sql.execution.datasources.LogicalRelation
 import org.apache.spark.sql.execution.datasources.PushableColumn
 import org.apache.spark.sql.execution.datasources.PushableColumnBase
-import org.apache.spark.sql.execution.datasources.parquet.GeoParquetFileFormat
+import org.apache.spark.sql.execution.datasources.parquet.GeoParquetFileFormatBase
 import org.apache.spark.sql.execution.datasources.parquet.GeoParquetSpatialFilter
 import org.apache.spark.sql.execution.datasources.parquet.GeoParquetSpatialFilter.AndFilter
 import org.apache.spark.sql.execution.datasources.parquet.GeoParquetSpatialFilter.LeafFilter
@@ -70,7 +70,7 @@ class SpatialFilterPushDownForGeoParquet(sparkSession: SparkSession) extends Rul
       val (_, normalizedFiltersWithoutSubquery) = normalizedFilters.partition(SubqueryExpression.hasSubquery)
       val geoParquetSpatialFilters = translateToGeoParquetSpatialFilters(normalizedFiltersWithoutSubquery)
       val hadoopFsRelation = lr.relation.asInstanceOf[HadoopFsRelation]
-      val fileFormat = hadoopFsRelation.fileFormat.asInstanceOf[GeoParquetFileFormat]
+      val fileFormat = hadoopFsRelation.fileFormat.asInstanceOf[GeoParquetFileFormatBase]
       if (geoParquetSpatialFilters.isEmpty) filter else {
         val combinedSpatialFilter = geoParquetSpatialFilters.reduce(AndFilter)
         val newFileFormat = fileFormat.withSpatialPredicates(combinedSpatialFilter)
@@ -81,7 +81,7 @@ class SpatialFilterPushDownForGeoParquet(sparkSession: SparkSession) extends Rul
 
   private def isGeoParquetRelation(lr: LogicalRelation): Boolean =
     lr.relation.isInstanceOf[HadoopFsRelation] &&
-      lr.relation.asInstanceOf[HadoopFsRelation].fileFormat.isInstanceOf[GeoParquetFileFormat]
+      lr.relation.asInstanceOf[HadoopFsRelation].fileFormat.isInstanceOf[GeoParquetFileFormatBase]
 
   private def translateToGeoParquetSpatialFilters(predicates: Seq[Expression]): Seq[GeoParquetSpatialFilter] = {
     val pushableColumn = PushableColumn(nestedPredicatePushdownEnabled = false)
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/BroadcastIndexJoinExec.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/BroadcastIndexJoinExec.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/BroadcastIndexJoinExec.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/BroadcastIndexJoinExec.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/DistanceJoinExec.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/DistanceJoinExec.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/DistanceJoinExec.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/DistanceJoinExec.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/JoinQueryDetector.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/JoinQueryDetector.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/JoinQueryDetector.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/JoinQueryDetector.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/RangeJoinExec.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/RangeJoinExec.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/RangeJoinExec.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/RangeJoinExec.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/SpatialIndexExec.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/SpatialIndexExec.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/SpatialIndexExec.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/SpatialIndexExec.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/TraitJoinQueryBase.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/TraitJoinQueryBase.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/TraitJoinQueryBase.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/TraitJoinQueryBase.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/TraitJoinQueryExec.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/TraitJoinQueryExec.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/TraitJoinQueryExec.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/TraitJoinQueryExec.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/enums.scala b/sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/enums.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/enums.scala
rename to sql/common/src/main/scala/org/apache/spark/sql/sedona_sql/strategy/join/enums.scala
diff --git a/sql/src/test/.gitignore b/sql/common/src/test/.gitignore
similarity index 100%
rename from sql/src/test/.gitignore
rename to sql/common/src/test/.gitignore
diff --git a/sql/src/test/java/org/apache/sedona/sql/adapterTestJava.java b/sql/common/src/test/java/org/apache/sedona/sql/adapterTestJava.java
similarity index 100%
rename from sql/src/test/java/org/apache/sedona/sql/adapterTestJava.java
rename to sql/common/src/test/java/org/apache/sedona/sql/adapterTestJava.java
diff --git a/sql/src/test/resources/log4j2.properties b/sql/common/src/test/resources/log4j2.properties
similarity index 100%
copy from sql/src/test/resources/log4j2.properties
copy to sql/common/src/test/resources/log4j2.properties
diff --git a/sql/src/test/scala/org/apache/sedona/sql/BroadcastIndexJoinSuite.scala b/sql/common/src/test/scala/org/apache/sedona/sql/BroadcastIndexJoinSuite.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/BroadcastIndexJoinSuite.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/BroadcastIndexJoinSuite.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/DeduplicationSuite.scala b/sql/common/src/test/scala/org/apache/sedona/sql/DeduplicationSuite.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/DeduplicationSuite.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/DeduplicationSuite.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/GeometrySample.scala b/sql/common/src/test/scala/org/apache/sedona/sql/GeometrySample.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/GeometrySample.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/GeometrySample.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/GeometryUdtTestScala.scala b/sql/common/src/test/scala/org/apache/sedona/sql/GeometryUdtTestScala.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/GeometryUdtTestScala.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/GeometryUdtTestScala.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/SpatialJoinSuite.scala b/sql/common/src/test/scala/org/apache/sedona/sql/SpatialJoinSuite.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/SpatialJoinSuite.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/SpatialJoinSuite.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/TestBaseScala.scala b/sql/common/src/test/scala/org/apache/sedona/sql/TestBaseScala.scala
similarity index 98%
rename from sql/src/test/scala/org/apache/sedona/sql/TestBaseScala.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/TestBaseScala.scala
index 0885ebf7..47c72e7c 100644
--- a/sql/src/test/scala/org/apache/sedona/sql/TestBaseScala.scala
+++ b/sql/common/src/test/scala/org/apache/sedona/sql/TestBaseScala.scala
@@ -41,7 +41,7 @@ trait TestBaseScala extends FunSpec with BeforeAndAfterAll {
     .config("sedona.join.autoBroadcastJoinThreshold", "-1")
     .getOrCreate()
 
-  val resourceFolder = System.getProperty("user.dir") + "/../core/src/test/resources/"
+  val resourceFolder = System.getProperty("user.dir") + "/../../core/src/test/resources/"
   val mixedWkbGeometryInputLocation = resourceFolder + "county_small_wkb.tsv"
   val mixedWktGeometryInputLocation = resourceFolder + "county_small.tsv"
   val shapefileInputLocation = resourceFolder + "shapefiles/dbf"
diff --git a/sql/src/test/scala/org/apache/sedona/sql/adapterTestScala.scala b/sql/common/src/test/scala/org/apache/sedona/sql/adapterTestScala.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/adapterTestScala.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/adapterTestScala.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/aggregateFunctionTestScala.scala b/sql/common/src/test/scala/org/apache/sedona/sql/aggregateFunctionTestScala.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/aggregateFunctionTestScala.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/aggregateFunctionTestScala.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/constructorTestScala.scala b/sql/common/src/test/scala/org/apache/sedona/sql/constructorTestScala.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/constructorTestScala.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/constructorTestScala.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/dataFrameAPITestScala.scala b/sql/common/src/test/scala/org/apache/sedona/sql/dataFrameAPITestScala.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/dataFrameAPITestScala.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/dataFrameAPITestScala.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/functionTestScala.scala b/sql/common/src/test/scala/org/apache/sedona/sql/functionTestScala.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/functionTestScala.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/functionTestScala.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/functions/FunctionsHelper.scala b/sql/common/src/test/scala/org/apache/sedona/sql/functions/FunctionsHelper.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/functions/FunctionsHelper.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/functions/FunctionsHelper.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/functions/STS2CellIDs.scala b/sql/common/src/test/scala/org/apache/sedona/sql/functions/STS2CellIDs.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/functions/STS2CellIDs.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/functions/STS2CellIDs.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/functions/StMakePolygonSpec.scala b/sql/common/src/test/scala/org/apache/sedona/sql/functions/StMakePolygonSpec.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/functions/StMakePolygonSpec.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/functions/StMakePolygonSpec.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/functions/TestGeometrySimplify.scala b/sql/common/src/test/scala/org/apache/sedona/sql/functions/TestGeometrySimplify.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/functions/TestGeometrySimplify.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/functions/TestGeometrySimplify.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/functions/TestStSubDivide.scala b/sql/common/src/test/scala/org/apache/sedona/sql/functions/TestStSubDivide.scala
similarity index 99%
rename from sql/src/test/scala/org/apache/sedona/sql/functions/TestStSubDivide.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/functions/TestStSubDivide.scala
index 10d4dd5c..9dd18632 100644
--- a/sql/src/test/scala/org/apache/sedona/sql/functions/TestStSubDivide.scala
+++ b/sql/common/src/test/scala/org/apache/sedona/sql/functions/TestStSubDivide.scala
@@ -47,7 +47,7 @@ class TestStSubDivide extends AnyFunSuite with Matchers with TableDrivenProperty
 
   object Fixtures {
 
-    val resourceFolder: String = System.getProperty("user.dir") + "/../core/src/test/resources/"
+    val resourceFolder: String = System.getProperty("user.dir") + "/../../core/src/test/resources/"
 
     private val geometries: Seq[(String, String, Int, Seq[String])] = {
 
diff --git a/sql/src/test/scala/org/apache/sedona/sql/functions/collect/TestStCollect.scala b/sql/common/src/test/scala/org/apache/sedona/sql/functions/collect/TestStCollect.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/functions/collect/TestStCollect.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/functions/collect/TestStCollect.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/functions/geohash/Fixtures.scala b/sql/common/src/test/scala/org/apache/sedona/sql/functions/geohash/Fixtures.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/functions/geohash/Fixtures.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/functions/geohash/Fixtures.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/functions/geohash/TestCalculatingGeoHash.scala b/sql/common/src/test/scala/org/apache/sedona/sql/functions/geohash/TestCalculatingGeoHash.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/functions/geohash/TestCalculatingGeoHash.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/functions/geohash/TestCalculatingGeoHash.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/functions/geohash/TestGeoHashDecoder.scala b/sql/common/src/test/scala/org/apache/sedona/sql/functions/geohash/TestGeoHashDecoder.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/functions/geohash/TestGeoHashDecoder.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/functions/geohash/TestGeoHashDecoder.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/functions/geohash/TestStGeoHash.scala b/sql/common/src/test/scala/org/apache/sedona/sql/functions/geohash/TestStGeoHash.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/functions/geohash/TestStGeoHash.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/functions/geohash/TestStGeoHash.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/functions/geohash/TestStGeometryFromGeoHash.scala b/sql/common/src/test/scala/org/apache/sedona/sql/functions/geohash/TestStGeometryFromGeoHash.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/functions/geohash/TestStGeometryFromGeoHash.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/functions/geohash/TestStGeometryFromGeoHash.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/implicits.scala b/sql/common/src/test/scala/org/apache/sedona/sql/implicits.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/implicits.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/implicits.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/ogc/GeometryTypesAndFunctionsTestScala.scala b/sql/common/src/test/scala/org/apache/sedona/sql/ogc/GeometryTypesAndFunctionsTestScala.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/ogc/GeometryTypesAndFunctionsTestScala.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/ogc/GeometryTypesAndFunctionsTestScala.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/ogc/package-info.java b/sql/common/src/test/scala/org/apache/sedona/sql/ogc/package-info.java
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/ogc/package-info.java
rename to sql/common/src/test/scala/org/apache/sedona/sql/ogc/package-info.java
diff --git a/sql/src/test/scala/org/apache/sedona/sql/predicateJoinTestScala.scala b/sql/common/src/test/scala/org/apache/sedona/sql/predicateJoinTestScala.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/predicateJoinTestScala.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/predicateJoinTestScala.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/predicateTestScala.scala b/sql/common/src/test/scala/org/apache/sedona/sql/predicateTestScala.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/predicateTestScala.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/predicateTestScala.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/rasteralgebraTest.scala b/sql/common/src/test/scala/org/apache/sedona/sql/rasteralgebraTest.scala
similarity index 93%
rename from sql/src/test/scala/org/apache/sedona/sql/rasteralgebraTest.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/rasteralgebraTest.scala
index 7dcc678d..f3854295 100644
--- a/sql/src/test/scala/org/apache/sedona/sql/rasteralgebraTest.scala
+++ b/sql/common/src/test/scala/org/apache/sedona/sql/rasteralgebraTest.scala
@@ -210,23 +210,9 @@ class rasteralgebraTest extends TestBaseScala with BeforeAndAfter with GivenWhen
   }
 
   describe("Should pass all transformation tests") {
-    it("Passed RS_Append for new data length") {
-      var df = sparkSession.read.format("geotiff").option("dropInvalid", true).load(resourceFolder + "raster/test3.tif")
-      df = df.selectExpr(" image.data as data", "image.nBands as nBands")
-      val rowFirst = df.first()
-      val nBands = rowFirst.getAs[Int](1)
-      val lengthInitial = rowFirst.getAs[mutable.WrappedArray[Double]](0).length
-      val lengthBand = lengthInitial / nBands
-
-      df = df.selectExpr("data", "nBands", "RS_GetBand(data, 1, nBands) as band1", "RS_GetBand(data, 2, nBands) as band2")
-      df = df.selectExpr("data", "nBands", "RS_NormalizedDifference(band2, band1) as normalizedDifference")
-      df = df.selectExpr("RS_Append(data, normalizedDifference, nBands) as targetData")
-      assert(df.first().getAs[mutable.WrappedArray[Double]](0).length == lengthInitial + lengthBand)
-    }
-
-    it("Passed RS_Append for new band elements") {
-      var df = sparkSession.read.format("geotiff").option("dropInvalid", true).load(resourceFolder + "raster/test3.tif")
-      df = df.selectExpr(" image.data as data", "image.nBands as nBands")
+    it("Passed RS_Append for new data length and new band elements") {
+      var df = Seq(Seq(200.0, 400.0, 600.0, 800.0, 100.0, 500.0, 800.0, 600.0)).toDF("data")
+      df = df.selectExpr("data", "2 as nBands")
       var rowFirst = df.first()
       val nBands = rowFirst.getAs[Int](1)
       val lengthInitial = rowFirst.getAs[mutable.WrappedArray[Double]](0).length
@@ -237,8 +223,9 @@ class rasteralgebraTest extends TestBaseScala with BeforeAndAfter with GivenWhen
       df = df.selectExpr("RS_Append(data, normalizedDifference, nBands) as targetData")
 
       rowFirst = df.first()
-      assert((rowFirst.getAs[mutable.WrappedArray[Double]](0)(lengthInitial) == 0.13) &&
-        (rowFirst.getAs[mutable.WrappedArray[Double]](0)(lengthInitial + lengthBand - 1) == 0.03))
+      assert(rowFirst.getAs[mutable.WrappedArray[Double]](0).length == lengthInitial + lengthBand)
+      assert((rowFirst.getAs[mutable.WrappedArray[Double]](0)(lengthInitial) == 0.33) &&
+        (rowFirst.getAs[mutable.WrappedArray[Double]](0)(lengthInitial + lengthBand - 1) == 0.14))
     }
   }
 
diff --git a/sql/src/test/scala/org/apache/sedona/sql/serdeAwareTest.scala b/sql/common/src/test/scala/org/apache/sedona/sql/serdeAwareTest.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/serdeAwareTest.scala
rename to sql/common/src/test/scala/org/apache/sedona/sql/serdeAwareTest.scala
diff --git a/sql/pom.xml b/sql/pom.xml
index 129a2121..beabf121 100644
--- a/sql/pom.xml
+++ b/sql/pom.xml
@@ -25,128 +25,88 @@
         <version>1.4.1-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
-	<artifactId>sedona-sql-${spark.compat.version}_${scala.compat.version}</artifactId>
+	<artifactId>sedona-sql-parent-${spark.compat.version}_${scala.compat.version}</artifactId>
 
 	<name>${project.groupId}:${project.artifactId}</name>
-	<description>A cluster computing system for processing large-scale spatial data: SQL API.</description>
+	<description>A cluster computing system for processing large-scale spatial data: Parent module for SQL API.</description>
     <url>http://sedona.apache.org/</url>
-	<packaging>jar</packaging>
+	<packaging>pom</packaging>
 
     <properties>
         <maven.deploy.skip>false</maven.deploy.skip>
     </properties>
 
-    <dependencies>
-        <dependency>
-            <groupId>org.apache.sedona</groupId>
-            <artifactId>sedona-common</artifactId>
-            <version>${project.version}</version>
-            <exclusions>
-                <exclusion>
-                    <groupId>com.fasterxml.jackson.core</groupId>
-                    <artifactId>*</artifactId>
-                </exclusion>
-            </exclusions>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.sedona</groupId>
-            <artifactId>sedona-core-${spark.compat.version}_${scala.compat.version}</artifactId>
-            <version>${project.version}</version>
-        </dependency>
+    <modules>
+        <module>common</module>
+        <module>spark-${spark.compat.version}</module>
+    </modules>
 
-        <dependency>
-            <groupId>org.apache.spark</groupId>
-            <artifactId>spark-core_${scala.compat.version}</artifactId>
-            <exclusions>
-                <!-- Make sure Hadoop's guava version is used for older versions of Spark tests -->
-                <exclusion>
-                    <groupId>com.google.guava</groupId>
-                    <artifactId>guava</artifactId>
-                </exclusion>
-            </exclusions>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.spark</groupId>
-            <artifactId>spark-sql_${scala.compat.version}</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-client</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.logging.log4j</groupId>
-            <artifactId>log4j-1.2-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.geotools</groupId>
-            <artifactId>gt-main</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.geotools</groupId>
-            <artifactId>gt-referencing</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.geotools</groupId>
-            <artifactId>gt-epsg-hsql</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.geotools</groupId>
-            <artifactId>gt-geotiff</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.geotools</groupId>
-            <artifactId>gt-coverage</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.geotools</groupId>
-            <artifactId>gt-arcgrid</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.locationtech.jts</groupId>
-            <artifactId>jts-core</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.wololo</groupId>
-            <artifactId>jts2geojson</artifactId>
-            <exclusions>
-                <exclusion>
-                    <groupId>com.fasterxml.jackson.core</groupId>
-                    <artifactId>*</artifactId>
-                </exclusion>
-            </exclusions>
-        </dependency>
-        <dependency>
-            <groupId>org.scala-lang</groupId>
-            <artifactId>scala-library</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.scala-lang.modules</groupId>
-            <artifactId>scala-collection-compat_${scala.compat.version}</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.scalatest</groupId>
-            <artifactId>scalatest_${scala.compat.version}</artifactId>
-        </dependency>
-        <dependency>
-                <groupId>org.mockito</groupId>
-                <artifactId>mockito-inline</artifactId>
-        </dependency>
-    </dependencies>
-	<build>
-        <sourceDirectory>src/main/scala</sourceDirectory>
-        <plugins>
-            <plugin>
-                <groupId>net.alchim31.maven</groupId>
-                <artifactId>scala-maven-plugin</artifactId>
-            </plugin>
-            <plugin>
+    <dependencyManagement>
+        <dependencies>
+            <dependency>
+                <groupId>org.apache.spark</groupId>
+                <artifactId>spark-core_${scala.compat.version}</artifactId>
+                <version>${spark.version}</version>
+                <scope>provided</scope>
+                <exclusions>
+                    <!-- Make sure Hadoop's guava version is used for older versions of Spark tests -->
+                    <exclusion>
+                        <groupId>com.google.guava</groupId>
+                        <artifactId>guava</artifactId>
+                    </exclusion>
+                    <!-- Exclude log4j 1 for older versions of Spark-->
+                    <exclusion>
+                        <groupId>log4j</groupId>
+                        <artifactId>log4j</artifactId>
+                    </exclusion>
+                    <exclusion>
+                        <groupId>org.slf4j</groupId>
+                        <artifactId>slf4j-log4j12</artifactId>
+                    </exclusion>
+                    <!-- Exclude log4j-slf4j2-impl for Spark 3.4 -->
+                    <exclusion>
+                        <groupId>org.apache.logging.log4j</groupId>
+                        <artifactId>log4j-slf4j2-impl</artifactId>
+                    </exclusion>
+                </exclusions>
+            </dependency>
+            <dependency>
+                <groupId>org.apache.spark</groupId>
+                <artifactId>spark-sql_${scala.compat.version}</artifactId>
+                <version>${spark.version}</version>
+                <scope>provided</scope>
+            </dependency>
+            <dependency>
+                <groupId>org.scala-lang</groupId>
+                <artifactId>scala-library</artifactId>
+                <version>${scala.version}</version>
+            </dependency>
+            <dependency>
+                <groupId>org.scala-lang.modules</groupId>
+                <artifactId>scala-collection-compat_${scala.compat.version}</artifactId>
+                <version>${scala-collection-compat.version}</version>
+            </dependency>
+            <dependency>
                 <groupId>org.scalatest</groupId>
-                <artifactId>scalatest-maven-plugin</artifactId>
-            </plugin>
-            <plugin>
-                <groupId>org.scalastyle</groupId>
-                <artifactId>scalastyle-maven-plugin</artifactId>
-            </plugin>
-        </plugins>
-	</build>
+                <artifactId>scalatest_${scala.compat.version}</artifactId>
+                <version>${scalatest.version}</version>
+                <scope>test</scope>
+            </dependency>
+        </dependencies>
+    </dependencyManagement>
+
+    <build>
+        <pluginManagement>
+            <plugins>
+                <plugin>
+                    <groupId>org.scalastyle</groupId>
+                    <artifactId>scalastyle-maven-plugin</artifactId>
+                    <configuration>
+                        <configLocation>${project.basedir}/../../scalastyle_config.xml</configLocation>
+                    </configuration>
+                </plugin>
+            </plugins>
+        </pluginManagement>
+    </build>
+
 </project>
diff --git a/sql/spark-3.0/.gitignore b/sql/spark-3.0/.gitignore
new file mode 100644
index 00000000..1cc6c4a1
--- /dev/null
+++ b/sql/spark-3.0/.gitignore
@@ -0,0 +1,12 @@
+/target/
+/.settings/
+/.classpath
+/.project
+/dependency-reduced-pom.xml
+/doc/
+/.idea/
+*.iml
+/latest/
+/spark-warehouse/
+/metastore_db/
+*.log
diff --git a/sql/pom.xml b/sql/spark-3.0/pom.xml
similarity index 91%
copy from sql/pom.xml
copy to sql/spark-3.0/pom.xml
index 129a2121..39d51a2a 100644
--- a/sql/pom.xml
+++ b/sql/spark-3.0/pom.xml
@@ -21,14 +21,14 @@
 	<modelVersion>4.0.0</modelVersion>
     <parent>
         <groupId>org.apache.sedona</groupId>
-        <artifactId>sedona-parent</artifactId>
+        <artifactId>sedona-sql-parent-${spark.compat.version}_${scala.compat.version}</artifactId>
         <version>1.4.1-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 	<artifactId>sedona-sql-${spark.compat.version}_${scala.compat.version}</artifactId>
 
 	<name>${project.groupId}:${project.artifactId}</name>
-	<description>A cluster computing system for processing large-scale spatial data: SQL API.</description>
+	<description>A cluster computing system for processing large-scale spatial data: SQL API for Spark 3.0 - 3.3.</description>
     <url>http://sedona.apache.org/</url>
 	<packaging>jar</packaging>
 
@@ -53,17 +53,15 @@
             <artifactId>sedona-core-${spark.compat.version}_${scala.compat.version}</artifactId>
             <version>${project.version}</version>
         </dependency>
+        <dependency>
+            <groupId>org.apache.sedona</groupId>
+            <artifactId>sedona-sql-common-${spark.compat.version}_${scala.compat.version}</artifactId>
+            <version>${project.version}</version>
+        </dependency>
 
         <dependency>
             <groupId>org.apache.spark</groupId>
             <artifactId>spark-core_${scala.compat.version}</artifactId>
-            <exclusions>
-                <!-- Make sure Hadoop's guava version is used for older versions of Spark tests -->
-                <exclusion>
-                    <groupId>com.google.guava</groupId>
-                    <artifactId>guava</artifactId>
-                </exclusion>
-            </exclusions>
         </dependency>
         <dependency>
             <groupId>org.apache.spark</groupId>
@@ -128,8 +126,8 @@
             <artifactId>scalatest_${scala.compat.version}</artifactId>
         </dependency>
         <dependency>
-                <groupId>org.mockito</groupId>
-                <artifactId>mockito-inline</artifactId>
+            <groupId>org.mockito</groupId>
+            <artifactId>mockito-inline</artifactId>
         </dependency>
     </dependencies>
 	<build>
diff --git a/sql/src/main/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister b/sql/spark-3.0/src/main/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister
similarity index 100%
copy from sql/src/main/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister
copy to sql/spark-3.0/src/main/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoDataSourceUtils.scala b/sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoDataSourceUtils.scala
similarity index 100%
copy from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoDataSourceUtils.scala
copy to sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoDataSourceUtils.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoDateTimeUtils.scala b/sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoDateTimeUtils.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoDateTimeUtils.scala
rename to sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoDateTimeUtils.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormat.scala b/sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormat.scala
similarity index 99%
copy from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormat.scala
copy to sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormat.scala
index 279efbbd..7f534e35 100644
--- a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormat.scala
+++ b/sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormat.scala
@@ -46,12 +46,10 @@ import scala.util.Failure
 import scala.util.Try
 
 class GeoParquetFileFormat(val spatialFilter: Option[GeoParquetSpatialFilter])
-  extends ParquetFileFormat with FileFormat with DataSourceRegister with Logging with Serializable {
+  extends ParquetFileFormat with GeoParquetFileFormatBase with FileFormat with DataSourceRegister with Logging with Serializable {
 
   def this() = this(None)
 
-  override def shortName(): String = "geoparquet"
-
   override def equals(other: Any): Boolean = other.isInstanceOf[GeoParquetFileFormat] &&
     other.asInstanceOf[GeoParquetFileFormat].spatialFilter == spatialFilter
 
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFilters.scala b/sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFilters.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFilters.scala
rename to sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFilters.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetReadSupport.scala b/sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetReadSupport.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetReadSupport.scala
rename to sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetReadSupport.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRecordMaterializer.scala b/sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRecordMaterializer.scala
similarity index 100%
copy from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRecordMaterializer.scala
copy to sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRecordMaterializer.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRowConverter.scala b/sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRowConverter.scala
similarity index 100%
copy from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRowConverter.scala
copy to sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRowConverter.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetSchemaConverter.scala b/sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetSchemaConverter.scala
similarity index 100%
copy from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetSchemaConverter.scala
copy to sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetSchemaConverter.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetUtils.scala b/sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetUtils.scala
similarity index 100%
copy from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetUtils.scala
copy to sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetUtils.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetWriteSupport.scala b/sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetWriteSupport.scala
similarity index 100%
copy from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetWriteSupport.scala
copy to sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetWriteSupport.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoSchemaMergeUtils.scala b/sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoSchemaMergeUtils.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoSchemaMergeUtils.scala
rename to sql/spark-3.0/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoSchemaMergeUtils.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/io/GeotiffFileFormat.scala b/sql/spark-3.0/src/main/scala/org/apache/spark/sql/sedona_sql/io/GeotiffFileFormat.scala
similarity index 100%
copy from sql/src/main/scala/org/apache/spark/sql/sedona_sql/io/GeotiffFileFormat.scala
copy to sql/spark-3.0/src/main/scala/org/apache/spark/sql/sedona_sql/io/GeotiffFileFormat.scala
diff --git a/sql/src/test/resources/log4j2.properties b/sql/spark-3.0/src/test/resources/log4j2.properties
similarity index 100%
copy from sql/src/test/resources/log4j2.properties
copy to sql/spark-3.0/src/test/resources/log4j2.properties
diff --git a/sql/src/test/scala/org/apache/sedona/sql/GeoParquetSpatialFilterPushDownSuite.scala b/sql/spark-3.0/src/test/scala/org/apache/sedona/sql/GeoParquetSpatialFilterPushDownSuite.scala
similarity index 100%
copy from sql/src/test/scala/org/apache/sedona/sql/GeoParquetSpatialFilterPushDownSuite.scala
copy to sql/spark-3.0/src/test/scala/org/apache/sedona/sql/GeoParquetSpatialFilterPushDownSuite.scala
diff --git a/sql/spark-3.0/src/test/scala/org/apache/sedona/sql/TestBaseScala.scala b/sql/spark-3.0/src/test/scala/org/apache/sedona/sql/TestBaseScala.scala
new file mode 100644
index 00000000..dfe6d04f
--- /dev/null
+++ b/sql/spark-3.0/src/test/scala/org/apache/sedona/sql/TestBaseScala.scala
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.sedona.sql
+
+import org.apache.log4j.{Level, Logger}
+import org.apache.sedona.core.serde.SedonaKryoRegistrator
+import org.apache.sedona.sql.utils.SedonaSQLRegistrator
+import org.apache.spark.serializer.KryoSerializer
+import org.apache.spark.sql.{DataFrame, SparkSession}
+import org.scalatest.{BeforeAndAfterAll, FunSpec}
+
+trait TestBaseScala extends FunSpec with BeforeAndAfterAll {
+  Logger.getRootLogger().setLevel(Level.WARN)
+  Logger.getLogger("org.apache").setLevel(Level.WARN)
+  Logger.getLogger("com").setLevel(Level.WARN)
+  Logger.getLogger("akka").setLevel(Level.WARN)
+  Logger.getLogger("org.apache.sedona.core").setLevel(Level.WARN)
+
+  val warehouseLocation = System.getProperty("user.dir") + "/target/"
+  val sparkSession = SparkSession.builder().config("spark.serializer", classOf[KryoSerializer].getName).
+    config("spark.kryo.registrator", classOf[SedonaKryoRegistrator].getName).
+    master("local[*]").appName("sedonasqlScalaTest")
+    .config("spark.sql.warehouse.dir", warehouseLocation)
+    // We need to be explicit about broadcasting in tests.
+    .config("sedona.join.autoBroadcastJoinThreshold", "-1")
+    .getOrCreate()
+
+  val resourceFolder = System.getProperty("user.dir") + "/../../core/src/test/resources/"
+
+  override def beforeAll(): Unit = {
+    SedonaSQLRegistrator.registerAll(sparkSession)
+  }
+
+  override def afterAll(): Unit = {
+    //SedonaSQLRegistrator.dropAll(spark)
+    //spark.stop
+  }
+
+  def loadCsv(path: String): DataFrame = {
+    sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(path)
+  }
+}
diff --git a/sql/src/test/scala/org/apache/sedona/sql/geoparquetIOTests.scala b/sql/spark-3.0/src/test/scala/org/apache/sedona/sql/geoparquetIOTests.scala
similarity index 100%
copy from sql/src/test/scala/org/apache/sedona/sql/geoparquetIOTests.scala
copy to sql/spark-3.0/src/test/scala/org/apache/sedona/sql/geoparquetIOTests.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/rasterIOTest.scala b/sql/spark-3.0/src/test/scala/org/apache/sedona/sql/rasterIOTest.scala
similarity index 100%
copy from sql/src/test/scala/org/apache/sedona/sql/rasterIOTest.scala
copy to sql/spark-3.0/src/test/scala/org/apache/sedona/sql/rasterIOTest.scala
diff --git a/sql/spark-3.4/.gitignore b/sql/spark-3.4/.gitignore
new file mode 100644
index 00000000..1cc6c4a1
--- /dev/null
+++ b/sql/spark-3.4/.gitignore
@@ -0,0 +1,12 @@
+/target/
+/.settings/
+/.classpath
+/.project
+/dependency-reduced-pom.xml
+/doc/
+/.idea/
+*.iml
+/latest/
+/spark-warehouse/
+/metastore_db/
+*.log
diff --git a/sql/pom.xml b/sql/spark-3.4/pom.xml
similarity index 91%
copy from sql/pom.xml
copy to sql/spark-3.4/pom.xml
index 129a2121..531d4498 100644
--- a/sql/pom.xml
+++ b/sql/spark-3.4/pom.xml
@@ -21,14 +21,14 @@
 	<modelVersion>4.0.0</modelVersion>
     <parent>
         <groupId>org.apache.sedona</groupId>
-        <artifactId>sedona-parent</artifactId>
+        <artifactId>sedona-sql-parent-${spark.compat.version}_${scala.compat.version}</artifactId>
         <version>1.4.1-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 	<artifactId>sedona-sql-${spark.compat.version}_${scala.compat.version}</artifactId>
 
 	<name>${project.groupId}:${project.artifactId}</name>
-	<description>A cluster computing system for processing large-scale spatial data: SQL API.</description>
+	<description>A cluster computing system for processing large-scale spatial data: SQL API for Spark 3.4.</description>
     <url>http://sedona.apache.org/</url>
 	<packaging>jar</packaging>
 
@@ -53,17 +53,15 @@
             <artifactId>sedona-core-${spark.compat.version}_${scala.compat.version}</artifactId>
             <version>${project.version}</version>
         </dependency>
+        <dependency>
+            <groupId>org.apache.sedona</groupId>
+            <artifactId>sedona-sql-common-${spark.compat.version}_${scala.compat.version}</artifactId>
+            <version>${project.version}</version>
+        </dependency>
 
         <dependency>
             <groupId>org.apache.spark</groupId>
             <artifactId>spark-core_${scala.compat.version}</artifactId>
-            <exclusions>
-                <!-- Make sure Hadoop's guava version is used for older versions of Spark tests -->
-                <exclusion>
-                    <groupId>com.google.guava</groupId>
-                    <artifactId>guava</artifactId>
-                </exclusion>
-            </exclusions>
         </dependency>
         <dependency>
             <groupId>org.apache.spark</groupId>
@@ -128,8 +126,8 @@
             <artifactId>scalatest_${scala.compat.version}</artifactId>
         </dependency>
         <dependency>
-                <groupId>org.mockito</groupId>
-                <artifactId>mockito-inline</artifactId>
+            <groupId>org.mockito</groupId>
+            <artifactId>mockito-inline</artifactId>
         </dependency>
     </dependencies>
 	<build>
diff --git a/sql/src/main/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister b/sql/spark-3.4/src/main/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister
similarity index 100%
rename from sql/src/main/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister
rename to sql/spark-3.4/src/main/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoDataSourceUtils.scala b/sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoDataSourceUtils.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoDataSourceUtils.scala
rename to sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoDataSourceUtils.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormat.scala b/sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormat.scala
similarity index 94%
rename from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormat.scala
rename to sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormat.scala
index 279efbbd..d1218ec5 100644
--- a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormat.scala
+++ b/sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetFileFormat.scala
@@ -15,7 +15,6 @@ package org.apache.spark.sql.execution.datasources.parquet
 
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs.FileStatus
-import org.apache.hadoop.fs.Path
 import org.apache.hadoop.mapreduce._
 import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
 import org.apache.parquet.filter2.compat.FilterCompat
@@ -46,12 +45,10 @@ import scala.util.Failure
 import scala.util.Try
 
 class GeoParquetFileFormat(val spatialFilter: Option[GeoParquetSpatialFilter])
-  extends ParquetFileFormat with FileFormat with DataSourceRegister with Logging with Serializable {
+  extends ParquetFileFormat with GeoParquetFileFormatBase with FileFormat with DataSourceRegister with Logging with Serializable {
 
   def this() = this(None)
 
-  override def shortName(): String = "geoparquet"
-
   override def equals(other: Any): Boolean = other.isInstanceOf[GeoParquetFileFormat] &&
     other.asInstanceOf[GeoParquetFileFormat].spatialFilter == spatialFilter
 
@@ -211,14 +208,17 @@ class GeoParquetFileFormat(val spatialFilter: Option[GeoParquetSpatialFilter])
     val pushDownDate = sqlConf.parquetFilterPushDownDate
     val pushDownTimestamp = sqlConf.parquetFilterPushDownTimestamp
     val pushDownDecimal = sqlConf.parquetFilterPushDownDecimal
-    val pushDownStringStartWith = sqlConf.parquetFilterPushDownStringStartWith
+    val parquetFilterPushDownStringPredicate = sqlConf.parquetFilterPushDownStringPredicate // .parquetFilterPushDownStringStartWith
     val pushDownInFilterThreshold = sqlConf.parquetFilterPushDownInFilterThreshold
     val isCaseSensitive = sqlConf.caseSensitiveAnalysis
 
+    val parquetOptions = new ParquetOptions(options, sparkSession.sessionState.conf)
+    val datetimeRebaseModeInRead = parquetOptions.datetimeRebaseModeInRead
+
     (file: PartitionedFile) => {
       assert(file.partitionValues.numFields == partitionSchema.size)
 
-      val filePath = new Path(new URI(file.filePath))
+      val filePath = file.toPath
       val split =
         new org.apache.parquet.hadoop.ParquetInputSplit(
           filePath,
@@ -232,11 +232,14 @@ class GeoParquetFileFormat(val spatialFilter: Option[GeoParquetSpatialFilter])
 
       val footerFileMetaData =
         ParquetFileReader.readFooter(sharedConf, filePath, SKIP_ROW_GROUPS).getFileMetaData
+      val datetimeRebaseSpec = DataSourceUtils.datetimeRebaseSpec(
+        footerFileMetaData.getKeyValueMetaData.get,
+        datetimeRebaseModeInRead)
       // Try to push down filters when filter push-down is enabled.
       val pushed = if (enableParquetFilterPushDown) {
         val parquetSchema = footerFileMetaData.getSchema
-        val parquetFilters = new GeoParquetFilters(parquetSchema, pushDownDate, pushDownTimestamp,
-          pushDownDecimal, pushDownStringStartWith, pushDownInFilterThreshold, isCaseSensitive)
+        val parquetFilters = new ParquetFilters(parquetSchema, pushDownDate, pushDownTimestamp,
+          pushDownDecimal, parquetFilterPushDownStringPredicate, pushDownInFilterThreshold, isCaseSensitive, datetimeRebaseSpec)
         filters
           // Collects all converted Parquet filter predicates. Notice that not all predicates can be
           // converted (`ParquetFilters.createFilter` returns an `Option`). That's why a `flatMap`
@@ -362,7 +365,7 @@ object GeoParquetFileFormat extends Logging {
         }
     }
 
-    GeoSchemaMergeUtils.mergeSchemasInParallel(sparkSession, parameters, filesToTouch, reader)
+    SchemaMergeUtils.mergeSchemasInParallel(sparkSession, parameters, filesToTouch, reader)
   }
 
   private def readSchemaFromFooter(footer: Footer, converter: GeoParquetToSparkSchemaConverter): StructType = {
diff --git a/sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetReadSupport.scala b/sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetReadSupport.scala
new file mode 100644
index 00000000..81125f95
--- /dev/null
+++ b/sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetReadSupport.scala
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.datasources.parquet
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.parquet.hadoop.api.ReadSupport.ReadContext
+import org.apache.parquet.hadoop.api.{InitContext, ReadSupport}
+import org.apache.parquet.io.api.RecordMaterializer
+import org.apache.parquet.schema.Type.Repetition
+import org.apache.parquet.schema._
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.catalyst.InternalRow
+import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.sql.internal.SQLConf.LegacyBehaviorPolicy
+import org.apache.spark.sql.sedona_sql.UDT.GeometryUDT
+import org.apache.spark.sql.types._
+
+import java.time.ZoneId
+import java.util.{Locale, Map => JMap}
+import scala.collection.JavaConverters._
+
+/**
+ * A Parquet [[ReadSupport]] implementation for reading Parquet records as Catalyst
+ * [[InternalRow]]s.
+ *
+ * The API interface of [[ReadSupport]] is a little bit over complicated because of historical
+ * reasons.  In older versions of parquet-mr (say 1.6.0rc3 and prior), [[ReadSupport]] need to be
+ * instantiated and initialized twice on both driver side and executor side.  The [[init()]] method
+ * is for driver side initialization, while [[prepareForRead()]] is for executor side.  However,
+ * starting from parquet-mr 1.6.0, it's no longer the case, and [[ReadSupport]] is only instantiated
+ * and initialized on executor side.  So, theoretically, now it's totally fine to combine these two
+ * methods into a single initialization method.  The only reason (I could think of) to still have
+ * them here is for parquet-mr API backwards-compatibility.
+ *
+ * Due to this reason, we no longer rely on [[ReadContext]] to pass requested schema from [[init()]]
+ * to [[prepareForRead()]], but use a private `var` for simplicity.
+ */
+class GeoParquetReadSupport (override val convertTz: Option[ZoneId],
+                             enableVectorizedReader: Boolean,
+                             datetimeRebaseMode: LegacyBehaviorPolicy.Value,
+                             int96RebaseMode: LegacyBehaviorPolicy.Value)
+  extends ParquetReadSupport with Logging {
+  private var catalystRequestedSchema: StructType = _
+
+  /**
+   * Called on executor side before [[prepareForRead()]] and instantiating actual Parquet record
+   * readers.  Responsible for figuring out Parquet requested schema used for column pruning.
+   */
+  override def init(context: InitContext): ReadContext = {
+    val conf = context.getConfiguration
+    catalystRequestedSchema = {
+      val schemaString = conf.get(ParquetReadSupport.SPARK_ROW_REQUESTED_SCHEMA)
+      assert(schemaString != null, "Parquet requested schema not set.")
+      StructType.fromString(schemaString)
+    }
+
+    val caseSensitive = conf.getBoolean(SQLConf.CASE_SENSITIVE.key,
+      SQLConf.CASE_SENSITIVE.defaultValue.get)
+    val schemaPruningEnabled = conf.getBoolean(SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key,
+      SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.defaultValue.get)
+    val useFieldId = conf.getBoolean(SQLConf.PARQUET_FIELD_ID_READ_ENABLED.key,
+      SQLConf.PARQUET_FIELD_ID_READ_ENABLED.defaultValue.get)
+    val parquetFileSchema = context.getFileSchema
+    val parquetClippedSchema = ParquetReadSupport.clipParquetSchema(parquetFileSchema,
+      catalystRequestedSchema, caseSensitive, useFieldId)
+
+    // We pass two schema to ParquetRecordMaterializer:
+    // - parquetRequestedSchema: the schema of the file data we want to read
+    // - catalystRequestedSchema: the schema of the rows we want to return
+    // The reader is responsible for reconciling the differences between the two.
+    val parquetRequestedSchema = if (schemaPruningEnabled && !enableVectorizedReader) {
+      // Parquet-MR reader requires that parquetRequestedSchema include only those fields present
+      // in the underlying parquetFileSchema. Therefore, we intersect the parquetClippedSchema
+      // with the parquetFileSchema
+      GeoParquetReadSupport.intersectParquetGroups(parquetClippedSchema, parquetFileSchema)
+        .map(groupType => new MessageType(groupType.getName, groupType.getFields))
+        .getOrElse(ParquetSchemaConverter.EMPTY_MESSAGE)
+    } else {
+      // Spark's vectorized reader only support atomic types currently. It also skip fields
+      // in parquetRequestedSchema which are not present in the file.
+      parquetClippedSchema
+    }
+    logDebug(
+      s"""Going to read the following fields from the Parquet file with the following schema:
+         |Parquet file schema:
+         |$parquetFileSchema
+         |Parquet clipped schema:
+         |$parquetClippedSchema
+         |Parquet requested schema:
+         |$parquetRequestedSchema
+         |Catalyst requested schema:
+         |${catalystRequestedSchema.treeString}
+       """.stripMargin)
+    new ReadContext(parquetRequestedSchema, Map.empty[String, String].asJava)
+  }
+  /**
+   * Called on executor side after [[init()]], before instantiating actual Parquet record readers.
+   * Responsible for instantiating [[RecordMaterializer]], which is used for converting Parquet
+   * records to Catalyst [[InternalRow]]s.
+   */
+  override def prepareForRead(
+                               conf: Configuration,
+                               keyValueMetaData: JMap[String, String],
+                               fileSchema: MessageType,
+                               readContext: ReadContext): RecordMaterializer[InternalRow] = {
+    val parquetRequestedSchema = readContext.getRequestedSchema
+    new GeoParquetRecordMaterializer(
+      parquetRequestedSchema,
+      GeoParquetReadSupport.expandUDT(catalystRequestedSchema),
+      new GeoParquetToSparkSchemaConverter(keyValueMetaData, conf),
+      convertTz,
+      datetimeRebaseMode,
+      int96RebaseMode)
+  }
+}
+
+object GeoParquetReadSupport extends Logging {
+
+  /**
+   * Computes the structural intersection between two Parquet group types.
+   * This is used to create a requestedSchema for ReadContext of Parquet-MR reader.
+   * Parquet-MR reader does not support the nested field access to non-existent field
+   * while parquet library does support to read the non-existent field by regular field access.
+   */
+  private def intersectParquetGroups(
+                                      groupType1: GroupType, groupType2: GroupType): Option[GroupType] = {
+    val fields =
+      groupType1.getFields.asScala
+        .filter(field => groupType2.containsField(field.getName))
+        .flatMap {
+          case field1: GroupType =>
+            val field2 = groupType2.getType(field1.getName)
+            if (field2.isPrimitive) {
+              None
+            } else {
+              intersectParquetGroups(field1, field2.asGroupType)
+            }
+          case field1 => Some(field1)
+        }
+
+    if (fields.nonEmpty) {
+      Some(groupType1.withNewFields(fields.asJava))
+    } else {
+      None
+    }
+  }
+
+  def expandUDT(schema: StructType): StructType = {
+    def expand(dataType: DataType): DataType = {
+      dataType match {
+        case t: ArrayType =>
+          t.copy(elementType = expand(t.elementType))
+
+        case t: MapType =>
+          t.copy(
+            keyType = expand(t.keyType),
+            valueType = expand(t.valueType))
+
+        case t: StructType =>
+          val expandedFields = t.fields.map(f => f.copy(dataType = expand(f.dataType)))
+          t.copy(fields = expandedFields)
+
+        // Don't expand GeometryUDT types. We'll treat geometry columns specially in
+        // GeoParquetRowConverter
+        case t: GeometryUDT => t
+
+        case t: UserDefinedType[_] =>
+          t.sqlType
+
+        case t =>
+          t
+      }
+    }
+
+    expand(schema).asInstanceOf[StructType]
+  }
+}
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRecordMaterializer.scala b/sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRecordMaterializer.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRecordMaterializer.scala
rename to sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRecordMaterializer.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRowConverter.scala b/sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRowConverter.scala
similarity index 99%
rename from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRowConverter.scala
rename to sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRowConverter.scala
index 2d12073c..11ea325c 100644
--- a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRowConverter.scala
+++ b/sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetRowConverter.scala
@@ -262,7 +262,7 @@ private[parquet] class GeoParquetRowConverter(
       case TimestampType if parquetType.getOriginalType == OriginalType.TIMESTAMP_MILLIS =>
         new ParquetPrimitiveConverter(updater) {
           override def addLong(value: Long): Unit = {
-            val micros = GeoDateTimeUtils.millisToMicros(value)
+            val micros = DateTimeUtils.millisToMicros(value)
             updater.setLong(timestampRebaseFunc(micros))
           }
         }
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetSchemaConverter.scala b/sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetSchemaConverter.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetSchemaConverter.scala
rename to sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetSchemaConverter.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetUtils.scala b/sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetUtils.scala
similarity index 100%
rename from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetUtils.scala
rename to sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetUtils.scala
diff --git a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetWriteSupport.scala b/sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetWriteSupport.scala
similarity index 99%
rename from sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetWriteSupport.scala
rename to sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetWriteSupport.scala
index 30f04f5d..c95fd26c 100644
--- a/sql/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetWriteSupport.scala
+++ b/sql/spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/GeoParquetWriteSupport.scala
@@ -258,7 +258,7 @@ class GeoParquetWriteSupport extends WriteSupport[InternalRow] with Logging {
           case SQLConf.ParquetOutputTimestampType.TIMESTAMP_MILLIS =>
             (row: SpecializedGetters, ordinal: Int) =>
               val micros = row.getLong(ordinal)
-              val millis = GeoDateTimeUtils.microsToMillis(timestampRebaseFunc(micros))
+              val millis = DateTimeUtils.microsToMillis(timestampRebaseFunc(micros))
               recordConsumer.addLong(millis)
         }
 
diff --git a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/io/GeotiffFileFormat.scala b/sql/spark-3.4/src/main/scala/org/apache/spark/sql/sedona_sql/io/GeotiffFileFormat.scala
similarity index 98%
rename from sql/src/main/scala/org/apache/spark/sql/sedona_sql/io/GeotiffFileFormat.scala
rename to sql/spark-3.4/src/main/scala/org/apache/spark/sql/sedona_sql/io/GeotiffFileFormat.scala
index 842e28f3..7a1a99ed 100644
--- a/sql/src/main/scala/org/apache/spark/sql/sedona_sql/io/GeotiffFileFormat.scala
+++ b/sql/spark-3.4/src/main/scala/org/apache/spark/sql/sedona_sql/io/GeotiffFileFormat.scala
@@ -101,8 +101,7 @@ private[spark] class GeotiffFileFormat extends FileFormat with DataSourceRegiste
       if (!imageSourceOptions.dropInvalid && requiredSchema.isEmpty) {
         Iterator(emptyUnsafeRow)
       } else {
-        val origin = file.filePath
-        val path = new Path(origin)
+        val path = file.toPath
         val fs = path.getFileSystem(broadcastedHadoopConf.value.value)
         val stream = fs.open(path)
         val bytes = try {
@@ -111,11 +110,11 @@ private[spark] class GeotiffFileFormat extends FileFormat with DataSourceRegiste
           Closeables.close(stream, true)
         }
 
-        val resultOpt = GeotiffSchema.decode(origin, bytes, imageSourceOptions)
+        val resultOpt = GeotiffSchema.decode(path.toString, bytes, imageSourceOptions)
         val filteredResult = if (imageSourceOptions.dropInvalid) {
           resultOpt.toIterator
         } else {
-          Iterator(resultOpt.getOrElse(GeotiffSchema.invalidImageRow(origin)))
+          Iterator(resultOpt.getOrElse(GeotiffSchema.invalidImageRow(path.toString)))
         }
 
         if (requiredSchema.isEmpty) {
diff --git a/sql/src/test/resources/log4j2.properties b/sql/spark-3.4/src/test/resources/log4j2.properties
similarity index 100%
rename from sql/src/test/resources/log4j2.properties
rename to sql/spark-3.4/src/test/resources/log4j2.properties
diff --git a/sql/src/test/scala/org/apache/sedona/sql/GeoParquetSpatialFilterPushDownSuite.scala b/sql/spark-3.4/src/test/scala/org/apache/sedona/sql/GeoParquetSpatialFilterPushDownSuite.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/GeoParquetSpatialFilterPushDownSuite.scala
rename to sql/spark-3.4/src/test/scala/org/apache/sedona/sql/GeoParquetSpatialFilterPushDownSuite.scala
diff --git a/sql/spark-3.4/src/test/scala/org/apache/sedona/sql/TestBaseScala.scala b/sql/spark-3.4/src/test/scala/org/apache/sedona/sql/TestBaseScala.scala
new file mode 100644
index 00000000..dfe6d04f
--- /dev/null
+++ b/sql/spark-3.4/src/test/scala/org/apache/sedona/sql/TestBaseScala.scala
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.sedona.sql
+
+import org.apache.log4j.{Level, Logger}
+import org.apache.sedona.core.serde.SedonaKryoRegistrator
+import org.apache.sedona.sql.utils.SedonaSQLRegistrator
+import org.apache.spark.serializer.KryoSerializer
+import org.apache.spark.sql.{DataFrame, SparkSession}
+import org.scalatest.{BeforeAndAfterAll, FunSpec}
+
+trait TestBaseScala extends FunSpec with BeforeAndAfterAll {
+  Logger.getRootLogger().setLevel(Level.WARN)
+  Logger.getLogger("org.apache").setLevel(Level.WARN)
+  Logger.getLogger("com").setLevel(Level.WARN)
+  Logger.getLogger("akka").setLevel(Level.WARN)
+  Logger.getLogger("org.apache.sedona.core").setLevel(Level.WARN)
+
+  val warehouseLocation = System.getProperty("user.dir") + "/target/"
+  val sparkSession = SparkSession.builder().config("spark.serializer", classOf[KryoSerializer].getName).
+    config("spark.kryo.registrator", classOf[SedonaKryoRegistrator].getName).
+    master("local[*]").appName("sedonasqlScalaTest")
+    .config("spark.sql.warehouse.dir", warehouseLocation)
+    // We need to be explicit about broadcasting in tests.
+    .config("sedona.join.autoBroadcastJoinThreshold", "-1")
+    .getOrCreate()
+
+  val resourceFolder = System.getProperty("user.dir") + "/../../core/src/test/resources/"
+
+  override def beforeAll(): Unit = {
+    SedonaSQLRegistrator.registerAll(sparkSession)
+  }
+
+  override def afterAll(): Unit = {
+    //SedonaSQLRegistrator.dropAll(spark)
+    //spark.stop
+  }
+
+  def loadCsv(path: String): DataFrame = {
+    sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(path)
+  }
+}
diff --git a/sql/src/test/scala/org/apache/sedona/sql/geoparquetIOTests.scala b/sql/spark-3.4/src/test/scala/org/apache/sedona/sql/geoparquetIOTests.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/geoparquetIOTests.scala
rename to sql/spark-3.4/src/test/scala/org/apache/sedona/sql/geoparquetIOTests.scala
diff --git a/sql/src/test/scala/org/apache/sedona/sql/rasterIOTest.scala b/sql/spark-3.4/src/test/scala/org/apache/sedona/sql/rasterIOTest.scala
similarity index 100%
rename from sql/src/test/scala/org/apache/sedona/sql/rasterIOTest.scala
rename to sql/spark-3.4/src/test/scala/org/apache/sedona/sql/rasterIOTest.scala
diff --git a/viz/pom.xml b/viz/pom.xml
index 0c588231..69eb062e 100644
--- a/viz/pom.xml
+++ b/viz/pom.xml
@@ -61,17 +61,35 @@
          <dependency>
             <groupId>org.apache.spark</groupId>
             <artifactId>spark-core_${scala.compat.version}</artifactId>
+            <version>${spark.version}</version>
+            <scope>provided</scope>
             <exclusions>
                 <!-- Make sure Hadoop's guava version is used for older versions of Spark tests -->
                 <exclusion>
                     <groupId>com.google.guava</groupId>
                     <artifactId>guava</artifactId>
                 </exclusion>
+                <!-- Exclude log4j 1 for older versions of Spark-->
+                <exclusion>
+                    <groupId>log4j</groupId>
+                    <artifactId>log4j</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>org.slf4j</groupId>
+                    <artifactId>slf4j-log4j12</artifactId>
+                </exclusion>
+                <!-- Exclude log4j-slf4j2-impl for Spark 3.4 -->
+                <exclusion>
+                    <groupId>org.apache.logging.log4j</groupId>
+                    <artifactId>log4j-slf4j2-impl</artifactId>
+                </exclusion>
             </exclusions>
         </dependency>
         <dependency>
             <groupId>org.apache.spark</groupId>
             <artifactId>spark-sql_${scala.compat.version}</artifactId>
+            <version>${spark.version}</version>
+            <scope>provided</scope>
         </dependency>
         <dependency>
             <groupId>org.apache.hadoop</groupId>
@@ -100,10 +118,12 @@
         <dependency>
             <groupId>org.scala-lang</groupId>
             <artifactId>scala-library</artifactId>
+            <version>${scala.version}</version>
         </dependency>
         <dependency>
             <groupId>org.scala-lang.modules</groupId>
             <artifactId>scala-collection-compat_${scala.compat.version}</artifactId>
+            <version>${scala-collection-compat.version}</version>
         </dependency>
         <dependency>
             <groupId>org.beryx</groupId>
@@ -125,6 +145,8 @@
         <dependency>
             <groupId>org.scalatest</groupId>
             <artifactId>scalatest_${scala.compat.version}</artifactId>
+            <version>${scalatest.version}</version>
+            <scope>test</scope>
         </dependency>
 	</dependencies>
 	<build>