You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iceberg.apache.org by bl...@apache.org on 2022/08/14 17:31:20 UTC

[iceberg] branch 1.0.x updated: Build: Enforce & apply spotless to 1.0.x branch (#5374)

This is an automated email from the ASF dual-hosted git repository.

blue pushed a commit to branch 1.0.x
in repository https://gitbox.apache.org/repos/asf/iceberg.git


The following commit(s) were added to refs/heads/1.0.x by this push:
     new 0da95d1599 Build: Enforce & apply spotless to 1.0.x branch (#5374)
0da95d1599 is described below

commit 0da95d1599cebc0835e21700c0a95633b4739e8b
Author: Eduard Tudenhöfner <et...@gmail.com>
AuthorDate: Sun Aug 14 19:31:14 2022 +0200

    Build: Enforce & apply spotless to 1.0.x branch (#5374)
---
 .baseline/checkstyle/checkstyle.xml                |   59 -
 .../copyright/copyright-header-java.txt            |    6 -
 README.md                                          |    3 +-
 .../iceberg/aliyun/AliyunClientFactories.java      |   45 +-
 .../apache/iceberg/aliyun/AliyunClientFactory.java |    5 +-
 .../apache/iceberg/aliyun/AliyunProperties.java    |   40 +-
 .../org/apache/iceberg/aliyun/oss/BaseOSSFile.java |    9 +-
 .../org/apache/iceberg/aliyun/oss/OSSFileIO.java   |   33 +-
 .../apache/iceberg/aliyun/oss/OSSInputFile.java    |   12 +-
 .../apache/iceberg/aliyun/oss/OSSInputStream.java  |   12 +-
 .../apache/iceberg/aliyun/oss/OSSOutputFile.java   |    7 +-
 .../apache/iceberg/aliyun/oss/OSSOutputStream.java |   10 +-
 .../java/org/apache/iceberg/aliyun/oss/OSSURI.java |   57 +-
 .../iceberg/aliyun/TestAliyunClientFactories.java  |   26 +-
 .../org/apache/iceberg/aliyun/TestUtility.java     |   22 +-
 .../iceberg/aliyun/oss/AliyunOSSTestBase.java      |    4 +-
 .../iceberg/aliyun/oss/AliyunOSSTestRule.java      |   38 +-
 .../iceberg/aliyun/oss/OSSIntegrationTestRule.java |   13 +-
 .../apache/iceberg/aliyun/oss/TestOSSFileIO.java   |   35 +-
 .../iceberg/aliyun/oss/TestOSSInputFile.java       |   34 +-
 .../iceberg/aliyun/oss/TestOSSInputStream.java     |   28 +-
 .../iceberg/aliyun/oss/TestOSSOutputFile.java      |   29 +-
 .../iceberg/aliyun/oss/TestOSSOutputStream.java    |   51 +-
 .../org/apache/iceberg/aliyun/oss/TestOSSURI.java  |   34 +-
 .../iceberg/aliyun/oss/mock/AliyunOSSMockApp.java  |   18 +-
 .../oss/mock/AliyunOSSMockLocalController.java     |   63 +-
 .../aliyun/oss/mock/AliyunOSSMockLocalStore.java   |   25 +-
 .../iceberg/aliyun/oss/mock/AliyunOSSMockRule.java |   28 +-
 .../iceberg/aliyun/oss/mock/ObjectMetadata.java    |    4 +-
 .../org/apache/iceberg/aliyun/oss/mock/Range.java  |    1 -
 .../aliyun/oss/mock/TestLocalAliyunOSS.java        |   13 +-
 api/src/main/java/org/apache/iceberg/Accessor.java |    1 -
 .../main/java/org/apache/iceberg/Accessors.java    |   23 +-
 .../java/org/apache/iceberg/AddedRowsScanTask.java |   28 +-
 .../main/java/org/apache/iceberg/AppendFiles.java  |   31 +-
 .../java/org/apache/iceberg/BaseScanTaskGroup.java |    8 +-
 .../org/apache/iceberg/ChangelogOperation.java     |    8 +-
 .../java/org/apache/iceberg/ChangelogScanTask.java |   16 +-
 .../java/org/apache/iceberg/CombinedScanTask.java  |    6 +-
 .../main/java/org/apache/iceberg/ContentFile.java  |   80 +-
 .../java/org/apache/iceberg/ContentScanTask.java   |    7 +-
 api/src/main/java/org/apache/iceberg/DataFile.java |  106 +-
 .../java/org/apache/iceberg/DataOperations.java    |   24 +-
 api/src/main/java/org/apache/iceberg/DataTask.java |    9 +-
 .../main/java/org/apache/iceberg/DeleteFile.java   |   11 +-
 .../main/java/org/apache/iceberg/DeleteFiles.java  |   29 +-
 .../apache/iceberg/DeletedDataFileScanTask.java    |   28 +-
 .../org/apache/iceberg/DeletedRowsScanTask.java    |   33 +-
 .../java/org/apache/iceberg/DistributionMode.java  |   29 +-
 .../java/org/apache/iceberg/ExpireSnapshots.java   |   71 +-
 .../main/java/org/apache/iceberg/FileContent.java  |    5 +-
 .../main/java/org/apache/iceberg/FileFormat.java   |    9 +-
 .../main/java/org/apache/iceberg/FileScanTask.java |    5 +-
 api/src/main/java/org/apache/iceberg/Files.java    |    7 +-
 .../main/java/org/apache/iceberg/HistoryEntry.java |   15 +-
 .../main/java/org/apache/iceberg/IcebergBuild.java |   19 +-
 .../org/apache/iceberg/IncrementalAppendScan.java  |   10 +-
 .../apache/iceberg/IncrementalChangelogScan.java   |    9 +-
 .../java/org/apache/iceberg/IncrementalScan.java   |   25 +-
 .../main/java/org/apache/iceberg/LockManager.java  |    9 +-
 .../java/org/apache/iceberg/ManageSnapshots.java   |   57 +-
 .../java/org/apache/iceberg/ManifestContent.java   |   11 +-
 .../main/java/org/apache/iceberg/ManifestFile.java |  204 +-
 .../java/org/apache/iceberg/MergeableScanTask.java |    5 +-
 api/src/main/java/org/apache/iceberg/Metrics.java  |   48 +-
 .../main/java/org/apache/iceberg/NullOrder.java    |    4 +-
 .../java/org/apache/iceberg/OverwriteFiles.java    |  135 +-
 .../java/org/apache/iceberg/PartitionField.java    |   29 +-
 .../main/java/org/apache/iceberg/PartitionKey.java |    9 +-
 .../java/org/apache/iceberg/PartitionSpec.java     |  172 +-
 .../java/org/apache/iceberg/PendingUpdate.java     |   18 +-
 .../java/org/apache/iceberg/ReplacePartitions.java |   69 +-
 .../java/org/apache/iceberg/ReplaceSortOrder.java  |   17 +-
 .../main/java/org/apache/iceberg/RewriteFiles.java |   44 +-
 .../java/org/apache/iceberg/RewriteJobOrder.java   |   30 +-
 .../java/org/apache/iceberg/RewriteManifests.java  |   47 +-
 api/src/main/java/org/apache/iceberg/Rollback.java |   11 +-
 api/src/main/java/org/apache/iceberg/RowDelta.java |  126 +-
 api/src/main/java/org/apache/iceberg/Scan.java     |   68 +-
 api/src/main/java/org/apache/iceberg/ScanTask.java |   13 +-
 .../java/org/apache/iceberg/ScanTaskGroup.java     |    5 +-
 api/src/main/java/org/apache/iceberg/Schema.java   |  166 +-
 api/src/main/java/org/apache/iceberg/Snapshot.java |   65 +-
 .../main/java/org/apache/iceberg/SnapshotRef.java  |   37 +-
 .../java/org/apache/iceberg/SnapshotRefType.java   |    5 +-
 .../java/org/apache/iceberg/SnapshotUpdate.java    |    1 -
 .../java/org/apache/iceberg/SortDirection.java     |    4 +-
 .../main/java/org/apache/iceberg/SortField.java    |   29 +-
 .../main/java/org/apache/iceberg/SortOrder.java    |   57 +-
 .../java/org/apache/iceberg/SortOrderBuilder.java  |    5 +-
 .../org/apache/iceberg/SplittableScanTask.java     |   11 +-
 .../main/java/org/apache/iceberg/StructLike.java   |    5 +-
 api/src/main/java/org/apache/iceberg/Table.java    |   67 +-
 .../main/java/org/apache/iceberg/TableScan.java    |   31 +-
 api/src/main/java/org/apache/iceberg/Tables.java   |   22 +-
 .../main/java/org/apache/iceberg/Transaction.java  |   32 +-
 .../org/apache/iceberg/UnboundPartitionSpec.java   |    4 +-
 .../java/org/apache/iceberg/UnboundSortOrder.java  |   23 +-
 .../java/org/apache/iceberg/UpdateLocation.java    |    5 +-
 .../org/apache/iceberg/UpdatePartitionSpec.java    |   74 +-
 .../java/org/apache/iceberg/UpdateProperties.java  |   10 +-
 .../main/java/org/apache/iceberg/UpdateSchema.java |  280 +--
 .../java/org/apache/iceberg/actions/Action.java    |   15 +-
 .../apache/iceberg/actions/ActionsProvider.java    |   54 +-
 .../actions/ConvertEqualityDeleteFiles.java        |   25 +-
 .../apache/iceberg/actions/DeleteOrphanFiles.java  |   52 +-
 .../iceberg/actions/DeleteReachableFiles.java      |   46 +-
 .../apache/iceberg/actions/ExpireSnapshots.java    |   79 +-
 .../org/apache/iceberg/actions/MigrateTable.java   |   21 +-
 .../apache/iceberg/actions/RewriteDataFiles.java   |  146 +-
 .../apache/iceberg/actions/RewriteManifests.java   |   32 +-
 .../actions/RewritePositionDeleteFiles.java        |   24 +-
 .../org/apache/iceberg/actions/SnapshotTable.java  |   23 +-
 .../org/apache/iceberg/actions/SnapshotUpdate.java |    5 +-
 .../java/org/apache/iceberg/catalog/Catalog.java   |   84 +-
 .../java/org/apache/iceberg/catalog/Namespace.java |   11 +-
 .../org/apache/iceberg/catalog/SessionCatalog.java |   63 +-
 .../apache/iceberg/catalog/SupportsNamespaces.java |   43 +-
 .../apache/iceberg/catalog/TableIdentifier.java    |   28 +-
 .../main/java/org/apache/iceberg/data/Record.java  |    5 +-
 .../iceberg/encryption/EncryptedInputFile.java     |   12 +-
 .../iceberg/encryption/EncryptedOutputFile.java    |   18 +-
 .../iceberg/encryption/EncryptionKeyMetadata.java  |   34 +-
 .../iceberg/encryption/EncryptionManager.java      |   23 +-
 .../org/apache/iceberg/encryption/KmsClient.java   |   33 +-
 .../iceberg/events/IncrementalScanEvent.java       |   14 +-
 .../java/org/apache/iceberg/events/Listener.java   |    5 +-
 .../java/org/apache/iceberg/events/Listeners.java  |   11 +-
 .../java/org/apache/iceberg/events/ScanEvent.java  |    5 +-
 .../iceberg/exceptions/AlreadyExistsException.java |    5 +-
 .../iceberg/exceptions/BadRequestException.java    |    5 +-
 .../CherrypickAncestorCommitException.java         |    9 +-
 .../iceberg/exceptions/CommitFailedException.java  |    5 +-
 .../exceptions/CommitStateUnknownException.java    |   17 +-
 .../exceptions/DuplicateWAPCommitException.java    |    5 +-
 .../iceberg/exceptions/ForbiddenException.java     |    5 +-
 .../exceptions/NamespaceNotEmptyException.java     |    5 +-
 .../exceptions/NoSuchIcebergTableException.java    |    5 +-
 .../exceptions/NoSuchNamespaceException.java       |    5 +-
 .../iceberg/exceptions/NoSuchTableException.java   |    5 +-
 .../iceberg/exceptions/NotAuthorizedException.java |    5 +-
 .../iceberg/exceptions/NotFoundException.java      |    5 +-
 .../apache/iceberg/exceptions/RESTException.java   |    5 +-
 .../iceberg/exceptions/RuntimeIOException.java     |    6 +-
 .../exceptions/ServiceFailureException.java        |    5 +-
 .../exceptions/UnprocessableEntityException.java   |    6 +-
 .../iceberg/exceptions/ValidationException.java    |   15 +-
 .../java/org/apache/iceberg/expressions/And.java   |    5 +-
 .../org/apache/iceberg/expressions/Binder.java     |   58 +-
 .../java/org/apache/iceberg/expressions/Bound.java |    5 +-
 .../iceberg/expressions/BoundLiteralPredicate.java |   16 +-
 .../apache/iceberg/expressions/BoundPredicate.java |    4 +-
 .../apache/iceberg/expressions/BoundReference.java |    7 +-
 .../iceberg/expressions/BoundSetPredicate.java     |   10 +-
 .../org/apache/iceberg/expressions/BoundTerm.java  |   10 +-
 .../apache/iceberg/expressions/BoundTransform.java |    1 -
 .../iceberg/expressions/BoundUnaryPredicate.java   |    1 -
 .../org/apache/iceberg/expressions/Evaluator.java  |    9 +-
 .../org/apache/iceberg/expressions/Expression.java |   33 +-
 .../apache/iceberg/expressions/ExpressionUtil.java |   89 +-
 .../iceberg/expressions/ExpressionVisitors.java    |   78 +-
 .../apache/iceberg/expressions/Expressions.java    |   49 +-
 .../java/org/apache/iceberg/expressions/False.java |    8 +-
 .../expressions/InclusiveMetricsEvaluator.java     |   88 +-
 .../org/apache/iceberg/expressions/Literal.java    |   32 +-
 .../org/apache/iceberg/expressions/Literals.java   |   62 +-
 .../iceberg/expressions/ManifestEvaluator.java     |   63 +-
 .../apache/iceberg/expressions/NamedReference.java |   10 +-
 .../java/org/apache/iceberg/expressions/Not.java   |    1 -
 .../java/org/apache/iceberg/expressions/Or.java    |    5 +-
 .../org/apache/iceberg/expressions/Predicate.java  |    2 -
 .../apache/iceberg/expressions/Projections.java    |   84 +-
 .../org/apache/iceberg/expressions/Reference.java  |    5 +-
 .../iceberg/expressions/ResidualEvaluator.java     |   48 +-
 .../org/apache/iceberg/expressions/RewriteNot.java |    4 +-
 .../iceberg/expressions/SerializationProxies.java  |   30 +-
 .../expressions/StrictMetricsEvaluator.java        |  101 +-
 .../java/org/apache/iceberg/expressions/Term.java  |    8 +-
 .../java/org/apache/iceberg/expressions/True.java  |    8 +-
 .../org/apache/iceberg/expressions/Unbound.java    |    9 +-
 .../iceberg/expressions/UnboundPredicate.java      |   54 +-
 .../apache/iceberg/expressions/UnboundTerm.java    |    4 +-
 .../iceberg/expressions/UnboundTransform.java      |   15 +-
 .../iceberg/io/BulkDeletionFailureException.java   |    1 -
 .../java/org/apache/iceberg/io/CloseableGroup.java |   32 +-
 .../org/apache/iceberg/io/CloseableIterable.java   |   23 +-
 .../org/apache/iceberg/io/CloseableIterator.java   |    4 +-
 .../org/apache/iceberg/io/ClosingIterator.java     |    5 +-
 .../org/apache/iceberg/io/CredentialSupplier.java  |   14 +-
 .../apache/iceberg/io/DelegatingInputStream.java   |    1 -
 .../apache/iceberg/io/DelegatingOutputStream.java  |    1 -
 .../java/org/apache/iceberg/io/FileAppender.java   |   17 +-
 .../main/java/org/apache/iceberg/io/FileIO.java    |   53 +-
 .../apache/iceberg/io/FileIOMetricsContext.java    |    5 +-
 .../main/java/org/apache/iceberg/io/FileInfo.java  |    1 -
 .../java/org/apache/iceberg/io/FilterIterator.java |    1 -
 .../main/java/org/apache/iceberg/io/InputFile.java |    5 +-
 .../org/apache/iceberg/io/LocationProvider.java    |    5 +-
 .../java/org/apache/iceberg/io/OutputFile.java     |   16 +-
 .../apache/iceberg/io/PositionOutputStream.java    |    2 -
 .../java/org/apache/iceberg/io/RangeReadable.java  |   31 +-
 .../org/apache/iceberg/io/SeekableInputStream.java |    3 +-
 .../apache/iceberg/io/SupportsBulkOperations.java  |    1 -
 .../iceberg/io/SupportsPrefixOperations.java       |   20 +-
 .../org/apache/iceberg/metrics/MetricsContext.java |   23 +-
 .../java/org/apache/iceberg/transforms/Bucket.java |   44 +-
 .../java/org/apache/iceberg/transforms/Dates.java  |   26 +-
 .../org/apache/iceberg/transforms/Identity.java    |    4 +-
 .../iceberg/transforms/PartitionSpecVisitor.java   |    4 +-
 .../apache/iceberg/transforms/ProjectionUtil.java  |  113 +-
 .../iceberg/transforms/SerializationProxies.java   |   15 +-
 .../iceberg/transforms/SortOrderVisitor.java       |   49 +-
 .../org/apache/iceberg/transforms/Timestamps.java  |   43 +-
 .../org/apache/iceberg/transforms/Transform.java   |   41 +-
 .../apache/iceberg/transforms/TransformUtil.java   |   14 +-
 .../org/apache/iceberg/transforms/Transforms.java  |   23 +-
 .../org/apache/iceberg/transforms/Truncate.java    |   43 +-
 .../iceberg/transforms/UnknownTransform.java       |   10 +-
 .../apache/iceberg/transforms/VoidTransform.java   |    4 +-
 .../org/apache/iceberg/types/AssignFreshIds.java   |    1 -
 .../apache/iceberg/types/CheckCompatibility.java   |   46 +-
 .../java/org/apache/iceberg/types/Comparators.java |   97 +-
 .../java/org/apache/iceberg/types/Conversions.java |    7 +-
 .../org/apache/iceberg/types/FindTypeVisitor.java  |    1 -
 .../org/apache/iceberg/types/GetProjectedIds.java  |    1 -
 .../java/org/apache/iceberg/types/IndexById.java   |   11 +-
 .../java/org/apache/iceberg/types/IndexByName.java |   38 +-
 .../org/apache/iceberg/types/IndexParents.java     |    7 +-
 .../java/org/apache/iceberg/types/JavaHash.java    |    1 -
 .../java/org/apache/iceberg/types/JavaHashes.java  |   22 +-
 .../org/apache/iceberg/types/PrimitiveHolder.java  |   12 +-
 .../org/apache/iceberg/types/PruneColumns.java     |   37 +-
 .../java/org/apache/iceberg/types/ReassignIds.java |    1 -
 .../main/java/org/apache/iceberg/types/Type.java   |    2 -
 .../java/org/apache/iceberg/types/TypeUtil.java    |  141 +-
 .../main/java/org/apache/iceberg/types/Types.java  |   54 +-
 .../java/org/apache/iceberg/util/BinaryUtil.java   |   21 +-
 .../java/org/apache/iceberg/util/ByteBuffers.java  |   19 +-
 .../org/apache/iceberg/util/CharSequenceSet.java   |   11 +-
 .../apache/iceberg/util/CharSequenceWrapper.java   |    5 +-
 .../org/apache/iceberg/util/ExceptionUtil.java     |   28 +-
 .../main/java/org/apache/iceberg/util/NaNUtil.java |    4 +-
 .../org/apache/iceberg/util/StructProjection.java  |   56 +-
 .../java/org/apache/iceberg/util/UUIDUtil.java     |   18 +-
 .../java/org/apache/iceberg/util/UnicodeUtil.java  |   25 +-
 .../java/org/apache/iceberg/AssertHelpers.java     |  111 +-
 .../org/apache/iceberg/PartitionSpecTestBase.java  |   98 +-
 .../java/org/apache/iceberg/TestAccessors.java     |  104 +-
 .../test/java/org/apache/iceberg/TestHelpers.java  |  126 +-
 .../java/org/apache/iceberg/TestIcebergBuild.java  |   11 +-
 .../apache/iceberg/TestMetricsSerialization.java   |    4 +-
 .../org/apache/iceberg/TestPartitionPaths.java     |   34 +-
 .../iceberg/TestPartitionSpecValidation.java       |  278 ++-
 .../java/org/apache/iceberg/TestSnapshotRef.java   |   61 +-
 .../apache/iceberg/TestTransformSerialization.java |    7 +-
 .../org/apache/iceberg/catalog/TestNamespace.java  |    1 -
 .../iceberg/catalog/TestTableIdentifier.java       |    8 +-
 .../org/apache/iceberg/events/TestListeners.java   |    7 +-
 .../apache/iceberg/expressions/TestEvaluator.java  |  558 +++--
 .../iceberg/expressions/TestExpressionBinding.java |   64 +-
 .../iceberg/expressions/TestExpressionHelpers.java |  190 +-
 .../expressions/TestExpressionSerialization.java   |  109 +-
 .../iceberg/expressions/TestExpressionUtil.java    |  303 ++-
 .../TestInclusiveManifestEvaluator.java            |  579 +++--
 .../expressions/TestInclusiveMetricsEvaluator.java |  632 +++--
 .../expressions/TestLiteralSerialization.java      |   46 +-
 .../TestMetricsEvaluatorsNaNHandling.java          |  448 ++--
 .../expressions/TestMiscLiteralConversions.java    |  125 +-
 .../expressions/TestNumericLiteralConversions.java |  112 +-
 .../iceberg/expressions/TestPredicateBinding.java  |  452 ++--
 .../expressions/TestStrictMetricsEvaluator.java    |  463 ++--
 .../expressions/TestStringLiteralConversions.java  |   87 +-
 .../org/apache/iceberg/io/TestCloseableGroup.java  |   13 +-
 .../apache/iceberg/io/TestCloseableIterable.java   |   22 +-
 .../org/apache/iceberg/io/TestClosingIterator.java |    5 +-
 .../iceberg/io/TestableCloseableIterable.java      |    1 -
 .../apache/iceberg/transforms/TestBucketing.java   |  168 +-
 .../transforms/TestBucketingProjection.java        |  209 +-
 .../org/apache/iceberg/transforms/TestDates.java   |  103 +-
 .../iceberg/transforms/TestDatesProjection.java    |  341 ++-
 .../apache/iceberg/transforms/TestIdentity.java    |   41 +-
 .../iceberg/transforms/TestNotStartsWith.java      |  156 +-
 .../apache/iceberg/transforms/TestProjection.java  |  375 +--
 .../apache/iceberg/transforms/TestResiduals.java   |  263 +-
 .../apache/iceberg/transforms/TestStartsWith.java  |   45 +-
 .../apache/iceberg/transforms/TestTimestamps.java  |  137 +-
 .../transforms/TestTimestampsProjection.java       |  654 +++--
 .../apache/iceberg/transforms/TestTruncate.java    |   20 +-
 .../transforms/TestTruncatesProjection.java        |  284 ++-
 .../iceberg/transforms/TestTruncatesResiduals.java |   40 +-
 .../apache/iceberg/types/TestBinaryComparator.java |   23 +-
 .../iceberg/types/TestCharSeqComparator.java       |   41 +-
 .../iceberg/types/TestComparableComparator.java    |   16 +-
 .../org/apache/iceberg/types/TestComparators.java  |   15 +-
 .../org/apache/iceberg/types/TestConversions.java  |  101 +-
 .../iceberg/types/TestReadabilityChecks.java       |  421 ++--
 .../iceberg/types/TestSerializableTypes.java       |  189 +-
 .../org/apache/iceberg/types/TestTypeUtil.java     |  689 +++---
 .../java/org/apache/iceberg/util/RandomUtil.java   |   10 +-
 .../apache/iceberg/util/TestCharSequenceSet.java   |   15 -
 .../org/apache/iceberg/util/TestExceptionUtil.java |  126 +-
 .../org/apache/iceberg/arrow/ArrowAllocation.java  |    4 +-
 .../org/apache/iceberg/arrow/ArrowSchemaUtil.java  |   18 +-
 .../iceberg/arrow/vectorized/ArrowBatchReader.java |   11 +-
 .../iceberg/arrow/vectorized/ArrowReader.java      |  256 +-
 .../arrow/vectorized/ArrowVectorAccessor.java      |    1 -
 .../arrow/vectorized/ArrowVectorAccessors.java     |   23 +-
 .../iceberg/arrow/vectorized/BaseBatchReader.java  |   12 +-
 .../iceberg/arrow/vectorized/ColumnVector.java     |   40 +-
 .../iceberg/arrow/vectorized/ColumnarBatch.java    |   42 +-
 .../GenericArrowVectorAccessorFactory.java         |  285 ++-
 .../arrow/vectorized/NullabilityHolder.java        |   14 +-
 .../iceberg/arrow/vectorized/VectorHolder.java     |   18 +-
 .../arrow/vectorized/VectorizedArrowReader.java    |  137 +-
 .../arrow/vectorized/VectorizedReaderBuilder.java  |   29 +-
 .../vectorized/VectorizedTableScanIterable.java    |   15 +-
 .../parquet/BaseVectorizedParquetValuesReader.java |   76 +-
 .../vectorized/parquet/DecimalVectorUtil.java      |   21 +-
 .../parquet/VectorizedColumnIterator.java          |  143 +-
 ...orizedDictionaryEncodedParquetValuesReader.java |   66 +-
 .../vectorized/parquet/VectorizedPageIterator.java |  352 ++-
 .../VectorizedParquetDefinitionLevelReader.java    |  422 +++-
 .../apache/iceberg/arrow/ArrowSchemaUtilTest.java  |   59 +-
 .../iceberg/arrow/vectorized/ArrowReaderTest.java  |  901 ++++---
 .../vectorized/parquet/DecimalVectorUtilTest.java  |    5 +-
 .../iceberg/aws/AssumeRoleAwsClientFactory.java    |   45 +-
 .../org/apache/iceberg/aws/AwsClientFactories.java |   63 +-
 .../org/apache/iceberg/aws/AwsClientFactory.java   |   11 +-
 .../java/org/apache/iceberg/aws/AwsProperties.java |  397 +--
 .../iceberg/aws/dynamodb/DynamoDbCatalog.java      |  358 +--
 .../iceberg/aws/dynamodb/DynamoDbLockManager.java  |  196 +-
 .../aws/dynamodb/DynamoDbTableOperations.java      |   93 +-
 .../apache/iceberg/aws/glue/DynamoLockManager.java |   15 +-
 .../org/apache/iceberg/aws/glue/GlueCatalog.java   |  337 ++-
 .../iceberg/aws/glue/GlueTableOperations.java      |  142 +-
 .../iceberg/aws/glue/GlueToIcebergConverter.java   |   14 +-
 .../iceberg/aws/glue/IcebergToGlueConverter.java   |  172 +-
 .../LakeFormationAwsClientFactory.java             |   79 +-
 .../java/org/apache/iceberg/aws/s3/BaseS3File.java |    7 +-
 .../java/org/apache/iceberg/aws/s3/S3FileIO.java   |  138 +-
 .../org/apache/iceberg/aws/s3/S3InputFile.java     |   36 +-
 .../org/apache/iceberg/aws/s3/S3InputStream.java   |   22 +-
 .../org/apache/iceberg/aws/s3/S3OutputFile.java    |   16 +-
 .../org/apache/iceberg/aws/s3/S3OutputStream.java  |  211 +-
 .../org/apache/iceberg/aws/s3/S3RequestUtil.java   |   76 +-
 .../main/java/org/apache/iceberg/aws/s3/S3URI.java |   61 +-
 .../apache/iceberg/aws/TestAwsClientFactories.java |   56 +-
 .../org/apache/iceberg/aws/TestAwsProperties.java  |   26 +-
 .../apache/iceberg/aws/glue/TestGlueCatalog.java   |  559 +++--
 .../aws/glue/TestGlueToIcebergConverter.java       |   44 +-
 .../aws/glue/TestIcebergToGlueConverter.java       |  257 +-
 .../org/apache/iceberg/aws/s3/TestS3FileIO.java    |  100 +-
 .../apache/iceberg/aws/s3/TestS3InputStream.java   |   23 +-
 .../apache/iceberg/aws/s3/TestS3OutputStream.java  |  181 +-
 .../apache/iceberg/aws/s3/TestS3RequestUtil.java   |   37 +-
 .../java/org/apache/iceberg/aws/s3/TestS3URI.java  |   18 +-
 baseline.gradle                                    |   14 +-
 build.gradle                                       |    2 +-
 .../main/java/org/apache/iceberg/GuavaClasses.java |    6 +-
 .../java/org/apache/iceberg/common/DynClasses.java |   29 +-
 .../org/apache/iceberg/common/DynConstructors.java |   50 +-
 .../java/org/apache/iceberg/common/DynFields.java  |   76 +-
 .../java/org/apache/iceberg/common/DynMethods.java |  147 +-
 .../iceberg/util/ZOrderByteUtilsBenchmark.java     |   14 +-
 .../java/org/apache/iceberg/AllDataFilesTable.java |   16 +-
 .../org/apache/iceberg/AllDeleteFilesTable.java    |   16 +-
 .../java/org/apache/iceberg/AllEntriesTable.java   |   24 +-
 .../java/org/apache/iceberg/AllFilesTable.java     |   16 +-
 .../java/org/apache/iceberg/AllManifestsTable.java |  176 +-
 .../apache/iceberg/BaseAllMetadataTableScan.java   |   26 +-
 .../org/apache/iceberg/BaseCombinedScanTask.java   |    5 +-
 .../src/main/java/org/apache/iceberg/BaseFile.java |   75 +-
 .../java/org/apache/iceberg/BaseFileScanTask.java  |   20 +-
 .../java/org/apache/iceberg/BaseFilesTable.java    |   84 +-
 .../apache/iceberg/BaseIncrementalAppendScan.java  |  123 +-
 .../java/org/apache/iceberg/BaseMetadataTable.java |   28 +-
 .../org/apache/iceberg/BaseMetadataTableScan.java  |   28 +-
 .../org/apache/iceberg/BaseMetastoreCatalog.java   |   31 +-
 .../iceberg/BaseMetastoreTableOperations.java      |  189 +-
 .../org/apache/iceberg/BaseOverwriteFiles.java     |   21 +-
 .../org/apache/iceberg/BaseReplacePartitions.java  |    5 +-
 .../org/apache/iceberg/BaseReplaceSortOrder.java   |   22 +-
 .../java/org/apache/iceberg/BaseRewriteFiles.java  |   27 +-
 .../org/apache/iceberg/BaseRewriteManifests.java   |  122 +-
 .../main/java/org/apache/iceberg/BaseRowDelta.java |   10 +-
 .../src/main/java/org/apache/iceberg/BaseScan.java |   42 +-
 .../main/java/org/apache/iceberg/BaseSnapshot.java |  155 +-
 .../main/java/org/apache/iceberg/BaseTable.java    |   13 +-
 .../java/org/apache/iceberg/BaseTableScan.java     |   47 +-
 .../java/org/apache/iceberg/BaseTransaction.java   |  169 +-
 .../apache/iceberg/BaseUpdatePartitionSpec.java    |  134 +-
 .../java/org/apache/iceberg/CachingCatalog.java    |   75 +-
 .../java/org/apache/iceberg/CatalogProperties.java |   30 +-
 .../main/java/org/apache/iceberg/CatalogUtil.java  |  214 +-
 .../org/apache/iceberg/CherryPickOperation.java    |   67 +-
 .../main/java/org/apache/iceberg/ClientPool.java   |    1 -
 .../java/org/apache/iceberg/ClientPoolImpl.java    |    4 +-
 .../apache/iceberg/CommitCallbackTransaction.java  |    1 -
 .../main/java/org/apache/iceberg/DataFiles.java    |   53 +-
 .../java/org/apache/iceberg/DataFilesTable.java    |    8 +-
 .../java/org/apache/iceberg/DataTableScan.java     |   73 +-
 .../java/org/apache/iceberg/DeleteFileIndex.java   |  249 +-
 .../java/org/apache/iceberg/DeleteFilesTable.java  |   11 +-
 .../org/apache/iceberg/DoubleFieldMetrics.java     |   20 +-
 .../main/java/org/apache/iceberg/FastAppend.java   |   55 +-
 .../main/java/org/apache/iceberg/FieldMetrics.java |   46 +-
 .../main/java/org/apache/iceberg/FileMetadata.java |   39 +-
 .../main/java/org/apache/iceberg/FilesTable.java   |    8 +-
 .../main/java/org/apache/iceberg/FindFiles.java    |   66 +-
 .../java/org/apache/iceberg/FloatFieldMetrics.java |   20 +-
 .../java/org/apache/iceberg/GenericDataFile.java   |   54 +-
 .../java/org/apache/iceberg/GenericDeleteFile.java |   56 +-
 .../org/apache/iceberg/GenericManifestEntry.java   |   13 +-
 .../org/apache/iceberg/GenericManifestFile.java    |   81 +-
 .../iceberg/GenericPartitionFieldSummary.java      |   32 +-
 .../org/apache/iceberg/HasTableOperations.java     |    5 +-
 .../main/java/org/apache/iceberg/HistoryTable.java |   37 +-
 .../apache/iceberg/IncrementalDataTableScan.java   |  106 +-
 .../java/org/apache/iceberg/IndexedStructLike.java |    5 +-
 .../org/apache/iceberg/InheritableMetadata.java    |    1 -
 .../apache/iceberg/InheritableMetadataFactory.java |   16 +-
 .../java/org/apache/iceberg/IsolationLevel.java    |   26 +-
 .../java/org/apache/iceberg/LocationProviders.java |   51 +-
 .../org/apache/iceberg/ManifestEntriesTable.java   |   56 +-
 .../java/org/apache/iceberg/ManifestEntry.java     |   26 +-
 .../java/org/apache/iceberg/ManifestFiles.java     |  134 +-
 .../org/apache/iceberg/ManifestFilterManager.java  |  211 +-
 .../java/org/apache/iceberg/ManifestGroup.java     |  190 +-
 .../org/apache/iceberg/ManifestListWriter.java     |   29 +-
 .../java/org/apache/iceberg/ManifestLists.java     |   44 +-
 .../org/apache/iceberg/ManifestMergeManager.java   |   77 +-
 .../java/org/apache/iceberg/ManifestReader.java    |  132 +-
 .../java/org/apache/iceberg/ManifestWriter.java    |   58 +-
 .../java/org/apache/iceberg/ManifestsTable.java    |   85 +-
 .../main/java/org/apache/iceberg/MergeAppend.java  |   16 +-
 .../apache/iceberg/MergingSnapshotProducer.java    |  483 ++--
 .../java/org/apache/iceberg/MetadataColumns.java   |   78 +-
 .../java/org/apache/iceberg/MetadataTableType.java |    1 -
 .../org/apache/iceberg/MetadataTableUtils.java     |   35 +-
 .../java/org/apache/iceberg/MetadataUpdate.java    |   25 +-
 .../org/apache/iceberg/MetadataUpdateParser.java   |  150 +-
 .../java/org/apache/iceberg/MetricsConfig.java     |   92 +-
 .../main/java/org/apache/iceberg/MetricsModes.java |   32 +-
 .../main/java/org/apache/iceberg/MetricsUtil.java  |   19 +-
 .../main/java/org/apache/iceberg/MicroBatches.java |  120 +-
 .../java/org/apache/iceberg/PartitionData.java     |   26 +-
 .../org/apache/iceberg/PartitionSpecParser.java    |   30 +-
 .../java/org/apache/iceberg/PartitionSummary.java  |    5 +-
 .../main/java/org/apache/iceberg/Partitioning.java |  172 +-
 .../java/org/apache/iceberg/PartitionsTable.java   |  115 +-
 .../java/org/apache/iceberg/PropertiesUpdate.java  |   36 +-
 .../java/org/apache/iceberg/ReachableFileUtil.java |   17 +-
 .../java/org/apache/iceberg/RemoveSnapshots.java   |  349 +--
 .../org/apache/iceberg/RollbackToSnapshot.java     |    1 -
 .../org/apache/iceberg/RowLevelOperationMode.java  |   27 +-
 .../main/java/org/apache/iceberg/ScanSummary.java  |   98 +-
 .../main/java/org/apache/iceberg/SchemaParser.java |   50 +-
 .../main/java/org/apache/iceberg/SchemaUpdate.java |  280 ++-
 .../apache/iceberg/SerializableByteBufferMap.java  |    8 +-
 .../java/org/apache/iceberg/SerializableTable.java |   37 +-
 .../main/java/org/apache/iceberg/SetLocation.java  |    7 +-
 .../org/apache/iceberg/SetSnapshotOperation.java   |   88 +-
 .../apache/iceberg/SnapshotIdGeneratorUtil.java    |    4 +-
 .../java/org/apache/iceberg/SnapshotManager.java   |   11 +-
 .../java/org/apache/iceberg/SnapshotParser.java    |   36 +-
 .../java/org/apache/iceberg/SnapshotProducer.java  |  288 ++-
 .../java/org/apache/iceberg/SnapshotRefParser.java |   25 +-
 .../java/org/apache/iceberg/SnapshotSummary.java   |   51 +-
 .../java/org/apache/iceberg/SnapshotsTable.java    |   49 +-
 .../java/org/apache/iceberg/SortOrderParser.java   |   28 +-
 .../java/org/apache/iceberg/StaticDataTask.java    |   30 +-
 .../org/apache/iceberg/StaticTableOperations.java  |   20 +-
 .../java/org/apache/iceberg/StaticTableScan.java   |   21 +-
 .../java/org/apache/iceberg/StreamingDelete.java   |    6 +-
 .../java/org/apache/iceberg/SystemProperties.java  |   12 +-
 .../java/org/apache/iceberg/TableMetadata.java     |  489 ++--
 .../org/apache/iceberg/TableMetadataParser.java    |  129 +-
 .../java/org/apache/iceberg/TableOperations.java   |   57 +-
 .../java/org/apache/iceberg/TableProperties.java   |  141 +-
 .../java/org/apache/iceberg/TableScanContext.java  |  221 +-
 .../main/java/org/apache/iceberg/Transactions.java |   14 +-
 .../iceberg/UpdateSnapshotReferencesOperation.java |   34 +-
 .../main/java/org/apache/iceberg/V1Metadata.java   |   45 +-
 .../main/java/org/apache/iceberg/V2Metadata.java   |   93 +-
 .../org/apache/iceberg/actions/BaseAction.java     |    1 -
 .../actions/BaseDeleteOrphanFilesActionResult.java |    1 -
 .../BaseDeleteReachableFilesActionResult.java      |   10 +-
 .../actions/BaseExpireSnapshotsActionResult.java   |   17 +-
 .../actions/BaseFileGroupRewriteResult.java        |    4 +-
 .../actions/BaseMigrateTableActionResult.java      |    1 -
 .../actions/BaseRewriteDataFilesAction.java        |  153 +-
 .../actions/BaseRewriteDataFilesFileGroupInfo.java |    4 +-
 .../actions/BaseRewriteDataFilesResult.java        |    1 -
 .../actions/BaseRewriteManifestsActionResult.java  |    5 +-
 .../actions/BaseSnapshotTableActionResult.java     |    1 -
 .../iceberg/actions/BaseSnapshotUpdateAction.java  |    5 +-
 .../apache/iceberg/actions/BinPackStrategy.java    |  234 +-
 .../actions/ConvertEqualityDeleteStrategy.java     |   27 +-
 .../actions/RewriteDataFilesActionResult.java      |    4 +-
 .../actions/RewriteDataFilesCommitManager.java     |  155 +-
 .../apache/iceberg/actions/RewriteFileGroup.java   |    9 +-
 .../actions/RewritePositionDeleteStrategy.java     |   27 +-
 .../apache/iceberg/actions/RewriteStrategy.java    |   27 +-
 .../iceberg/actions/SnapshotUpdateAction.java      |    1 -
 .../org/apache/iceberg/actions/SortStrategy.java   |   38 +-
 .../main/java/org/apache/iceberg/avro/Avro.java    |  145 +-
 .../iceberg/avro/AvroCustomOrderSchemaVisitor.java |    5 +-
 .../org/apache/iceberg/avro/AvroEncoderUtil.java   |   11 +-
 .../org/apache/iceberg/avro/AvroFileAppender.java  |   29 +-
 .../main/java/org/apache/iceberg/avro/AvroIO.java  |   35 +-
 .../java/org/apache/iceberg/avro/AvroIterable.java |   13 +-
 .../java/org/apache/iceberg/avro/AvroMetrics.java  |   11 +-
 .../org/apache/iceberg/avro/AvroSchemaUtil.java    |  160 +-
 .../org/apache/iceberg/avro/AvroSchemaVisitor.java |    5 +-
 .../iceberg/avro/AvroSchemaWithTypeVisitor.java    |   26 +-
 .../avro/AvroWithPartnerByStructureVisitor.java    |   56 +-
 .../apache/iceberg/avro/BuildAvroProjection.java   |   82 +-
 .../org/apache/iceberg/avro/GenericAvroReader.java |    7 +-
 .../org/apache/iceberg/avro/GenericAvroWriter.java |   14 +-
 .../main/java/org/apache/iceberg/avro/HasIds.java  |   11 +-
 .../java/org/apache/iceberg/avro/LogicalMap.java   |   13 +-
 .../iceberg/avro/MetricsAwareDatumWriter.java      |    9 +-
 .../java/org/apache/iceberg/avro/MissingIds.java   |   20 +-
 .../apache/iceberg/avro/ProjectionDatumReader.java |   10 +-
 .../java/org/apache/iceberg/avro/PruneColumns.java |   74 +-
 .../java/org/apache/iceberg/avro/RemoveIds.java    |    7 +-
 .../java/org/apache/iceberg/avro/SchemaToType.java |   24 +-
 .../apache/iceberg/avro/SupportsRowPosition.java   |    4 +-
 .../java/org/apache/iceberg/avro/TypeToSchema.java |   60 +-
 .../org/apache/iceberg/avro/UUIDConversion.java    |    1 -
 .../java/org/apache/iceberg/avro/ValueReader.java  |    1 -
 .../java/org/apache/iceberg/avro/ValueReaders.java |  116 +-
 .../java/org/apache/iceberg/avro/ValueWriter.java  |    1 -
 .../java/org/apache/iceberg/avro/ValueWriters.java |   83 +-
 .../apache/iceberg/catalog/BaseSessionCatalog.java |   12 +-
 .../iceberg/catalog/TableIdentifierParser.java     |   40 +-
 .../org/apache/iceberg/data/GenericRecord.java     |   20 +-
 .../iceberg/data/IdentityPartitionConverters.java  |    9 +-
 .../org/apache/iceberg/data/avro/DataReader.java   |   31 +-
 .../org/apache/iceberg/data/avro/DataWriter.java   |   11 +-
 .../apache/iceberg/data/avro/DecoderResolver.java  |   23 +-
 .../apache/iceberg/data/avro/GenericReaders.java   |   22 +-
 .../apache/iceberg/data/avro/GenericWriters.java   |   16 +-
 .../apache/iceberg/data/avro/IcebergDecoder.java   |   82 +-
 .../apache/iceberg/data/avro/IcebergEncoder.java   |   20 +-
 .../iceberg/deletes/BitmapPositionDeleteIndex.java |    1 -
 .../java/org/apache/iceberg/deletes/Deletes.java   |  135 +-
 .../iceberg/deletes/EqualityDeleteWriter.java      |   34 +-
 .../org/apache/iceberg/deletes/PositionDelete.java |    4 +-
 .../iceberg/deletes/PositionDeleteIndex.java       |    8 +-
 .../iceberg/deletes/PositionDeleteWriter.java      |   35 +-
 .../iceberg/encryption/BaseEncryptedInputFile.java |    1 -
 .../encryption/BaseEncryptedOutputFile.java        |    1 -
 .../encryption/BaseEncryptionKeyMetadata.java      |    4 +-
 .../org/apache/iceberg/encryption/Ciphers.java     |   35 +-
 .../apache/iceberg/encryption/EncryptedFiles.java  |   19 +-
 .../iceberg/encryption/EncryptionAlgorithm.java    |   51 +-
 .../iceberg/encryption/EncryptionKeyMetadatas.java |    4 +-
 .../iceberg/encryption/InputFilesDecryptor.java    |   16 +-
 .../encryption/NativeFileCryptoParameters.java     |   13 +-
 .../iceberg/encryption/NativelyEncryptedFile.java  |    7 +-
 .../encryption/PlaintextEncryptionManager.java     |    4 +-
 .../apache/iceberg/events/CreateSnapshotEvent.java |    7 +-
 .../org/apache/iceberg/expressions/Zorder.java     |    1 -
 .../apache/iceberg/hadoop/ConfigProperties.java    |    4 +-
 .../org/apache/iceberg/hadoop/Configurable.java    |    5 +-
 .../org/apache/iceberg/hadoop/HadoopCatalog.java   |   77 +-
 .../apache/iceberg/hadoop/HadoopConfigurable.java  |   26 +-
 .../org/apache/iceberg/hadoop/HadoopFileIO.java    |   27 +-
 .../org/apache/iceberg/hadoop/HadoopInputFile.java |   19 +-
 .../iceberg/hadoop/HadoopMetricsContext.java       |   24 +-
 .../apache/iceberg/hadoop/HadoopOutputFile.java    |    5 +-
 .../org/apache/iceberg/hadoop/HadoopStreams.java   |   24 +-
 .../iceberg/hadoop/HadoopTableOperations.java      |   76 +-
 .../org/apache/iceberg/hadoop/HadoopTables.java    |   63 +-
 .../apache/iceberg/hadoop/HiddenPathFilter.java    |    8 +-
 .../iceberg/hadoop/SerializableConfiguration.java  |    5 +-
 .../main/java/org/apache/iceberg/hadoop/Util.java  |   14 +-
 .../apache/iceberg/io/BasePositionDeltaWriter.java |   11 +-
 .../java/org/apache/iceberg/io/BaseTaskWriter.java |   36 +-
 .../apache/iceberg/io/ByteBufferInputStream.java   |    4 +-
 .../org/apache/iceberg/io/ClusteredDataWriter.java |   15 +-
 .../iceberg/io/ClusteredEqualityDeleteWriter.java  |   19 +-
 .../iceberg/io/ClusteredPositionDeleteWriter.java  |   22 +-
 .../org/apache/iceberg/io/ClusteredWriter.java     |   31 +-
 .../org/apache/iceberg/io/DataWriteResult.java     |    7 +-
 .../java/org/apache/iceberg/io/DataWriter.java     |   41 +-
 .../org/apache/iceberg/io/DeleteSchemaUtil.java    |   12 +-
 .../org/apache/iceberg/io/DeleteWriteResult.java   |    7 +-
 .../org/apache/iceberg/io/EqualityDeltaWriter.java |   16 +-
 .../org/apache/iceberg/io/FanoutDataWriter.java    |   15 +-
 .../java/org/apache/iceberg/io/FanoutWriter.java   |   30 +-
 .../org/apache/iceberg/io/FileAppenderFactory.java |   22 +-
 .../java/org/apache/iceberg/io/FileIOParser.java   |   15 +-
 .../java/org/apache/iceberg/io/FileWriter.java     |   16 +-
 .../org/apache/iceberg/io/FileWriterFactory.java   |   11 +-
 .../main/java/org/apache/iceberg/io/IOUtil.java    |   18 +-
 .../apache/iceberg/io/MultiBufferInputStream.java  |    7 +-
 .../org/apache/iceberg/io/OutputFileFactory.java   |   55 +-
 .../apache/iceberg/io/PartitionedFanoutWriter.java |   17 +-
 .../org/apache/iceberg/io/PartitionedWriter.java   |   14 +-
 .../org/apache/iceberg/io/PartitioningWriter.java  |   20 +-
 .../org/apache/iceberg/io/PositionDeltaWriter.java |   21 +-
 .../org/apache/iceberg/io/ResolvingFileIO.java     |   37 +-
 .../org/apache/iceberg/io/RollingDataWriter.java   |   11 +-
 .../iceberg/io/RollingEqualityDeleteWriter.java    |   21 +-
 .../org/apache/iceberg/io/RollingFileWriter.java   |   19 +-
 .../iceberg/io/RollingPositionDeleteWriter.java    |   15 +-
 .../apache/iceberg/io/SingleBufferInputStream.java |    8 +-
 .../apache/iceberg/io/SortedPosDeleteWriter.java   |   36 +-
 .../java/org/apache/iceberg/io/StructCopy.java     |    5 +-
 .../java/org/apache/iceberg/io/TaskWriter.java     |   11 +-
 .../org/apache/iceberg/io/UnpartitionedWriter.java |   10 +-
 .../java/org/apache/iceberg/io/WriteResult.java    |    6 +-
 .../java/org/apache/iceberg/jdbc/JdbcCatalog.java  |  363 +--
 .../org/apache/iceberg/jdbc/JdbcClientPool.java    |   10 +-
 .../apache/iceberg/jdbc/JdbcTableOperations.java   |  128 +-
 .../java/org/apache/iceberg/jdbc/JdbcUtil.java     |  308 ++-
 .../jdbc/UncheckedInterruptedException.java        |    1 -
 .../apache/iceberg/jdbc/UncheckedSQLException.java |    1 -
 .../org/apache/iceberg/mapping/MappedField.java    |   18 +-
 .../org/apache/iceberg/mapping/MappedFields.java   |   32 +-
 .../org/apache/iceberg/mapping/MappingUtil.java    |   68 +-
 .../org/apache/iceberg/mapping/NameMapping.java    |    5 +-
 .../apache/iceberg/mapping/NameMappingParser.java  |   11 +-
 .../main/java/org/apache/iceberg/puffin/Blob.java  |   15 +-
 .../org/apache/iceberg/puffin/BlobMetadata.java    |   27 +-
 .../org/apache/iceberg/puffin/FileMetadata.java    |    1 -
 .../apache/iceberg/puffin/FileMetadataParser.java  |   24 +-
 .../java/org/apache/iceberg/puffin/Puffin.java     |   43 +-
 .../iceberg/puffin/PuffinCompressionCodec.java     |   33 +-
 .../org/apache/iceberg/puffin/PuffinFormat.java    |   47 +-
 .../org/apache/iceberg/puffin/PuffinReader.java    |   87 +-
 .../org/apache/iceberg/puffin/PuffinWriter.java    |   32 +-
 .../apache/iceberg/puffin/StandardBlobTypes.java   |    7 +-
 .../iceberg/puffin/StandardPuffinProperties.java   |    8 +-
 .../org/apache/iceberg/rest/CatalogHandlers.java   |  185 +-
 .../org/apache/iceberg/rest/ErrorHandlers.java     |   21 +-
 .../java/org/apache/iceberg/rest/HTTPClient.java   |  101 +-
 .../org/apache/iceberg/rest/HTTPClientFactory.java |   16 +-
 .../java/org/apache/iceberg/rest/RESTCatalog.java  |   62 +-
 .../java/org/apache/iceberg/rest/RESTClient.java   |   92 +-
 .../java/org/apache/iceberg/rest/RESTMessage.java  |    9 +-
 .../org/apache/iceberg/rest/RESTObjectMapper.java  |    4 +-
 .../java/org/apache/iceberg/rest/RESTRequest.java  |    8 +-
 .../java/org/apache/iceberg/rest/RESTResponse.java |    8 +-
 .../org/apache/iceberg/rest/RESTSerializers.java   |   34 +-
 .../apache/iceberg/rest/RESTSessionCatalog.java    |  339 ++-
 .../apache/iceberg/rest/RESTTableOperations.java   |   46 +-
 .../java/org/apache/iceberg/rest/RESTUtil.java     |   85 +-
 .../org/apache/iceberg/rest/ResourcePaths.java     |    7 +-
 .../apache/iceberg/rest/auth/OAuth2Properties.java |   25 +-
 .../org/apache/iceberg/rest/auth/OAuth2Util.java   |  181 +-
 .../rest/requests/CreateNamespaceRequest.java      |   14 +-
 .../iceberg/rest/requests/CreateTableRequest.java  |   27 +-
 .../iceberg/rest/requests/RenameTableRequest.java  |    8 +-
 .../requests/UpdateNamespacePropertiesRequest.java |   34 +-
 .../rest/requests/UpdateRequirementParser.java     |   93 +-
 .../iceberg/rest/requests/UpdateTableRequest.java  |    7 +-
 .../iceberg/rest/responses/ConfigResponse.java     |   53 +-
 .../rest/responses/CreateNamespaceResponse.java    |   21 +-
 .../iceberg/rest/responses/ErrorResponse.java      |    8 +-
 .../rest/responses/ErrorResponseParser.java        |   17 +-
 .../rest/responses/GetNamespaceResponse.java       |   17 +-
 .../rest/responses/ListNamespacesResponse.java     |   11 +-
 .../iceberg/rest/responses/ListTablesResponse.java |   14 +-
 .../iceberg/rest/responses/LoadTableResponse.java  |   17 +-
 .../iceberg/rest/responses/OAuthTokenResponse.java |   21 +-
 .../UpdateNamespacePropertiesResponse.java         |   21 +-
 .../iceberg/schema/SchemaWithPartnerVisitor.java   |   23 +-
 .../apache/iceberg/schema/UnionByNameVisitor.java  |   52 +-
 .../java/org/apache/iceberg/types/FixupTypes.java  |    1 -
 .../java/org/apache/iceberg/util/ArrayUtil.java    |  115 +-
 .../java/org/apache/iceberg/util/BinPacking.java   |   40 +-
 .../apache/iceberg/util/CopySortOrderFields.java   |   11 +-
 .../java/org/apache/iceberg/util/DateTimeUtil.java |   10 +-
 .../java/org/apache/iceberg/util/DecimalUtil.java  |   26 +-
 .../org/apache/iceberg/util/EnvironmentUtil.java   |   25 +-
 .../java/org/apache/iceberg/util/Exceptions.java   |    7 +-
 .../main/java/org/apache/iceberg/util/Filter.java  |    1 -
 .../java/org/apache/iceberg/util/JsonUtil.java     |   99 +-
 .../java/org/apache/iceberg/util/LocationUtil.java |    8 +-
 .../java/org/apache/iceberg/util/LockManagers.java |  116 +-
 .../org/apache/iceberg/util/ManifestFileUtil.java  |   24 +-
 .../main/java/org/apache/iceberg/util/Pair.java    |   38 +-
 .../org/apache/iceberg/util/ParallelIterable.java  |   39 +-
 .../java/org/apache/iceberg/util/PartitionSet.java |   14 +-
 .../org/apache/iceberg/util/PartitionUtil.java     |   27 +-
 .../java/org/apache/iceberg/util/PropertyUtil.java |   31 +-
 .../org/apache/iceberg/util/SerializableMap.java   |    1 -
 .../apache/iceberg/util/SerializableSupplier.java  |    1 -
 .../org/apache/iceberg/util/SerializationUtil.java |   22 +-
 .../java/org/apache/iceberg/util/SnapshotUtil.java |  186 +-
 .../org/apache/iceberg/util/SortOrderUtil.java     |   50 +-
 .../java/org/apache/iceberg/util/SortedMerge.java  |   18 +-
 .../org/apache/iceberg/util/StructLikeMap.java     |    5 +-
 .../org/apache/iceberg/util/StructLikeSet.java     |    5 +-
 .../org/apache/iceberg/util/StructLikeWrapper.java |   11 +-
 .../org/apache/iceberg/util/TableScanUtil.java     |   83 +-
 .../main/java/org/apache/iceberg/util/Tasks.java   |  195 +-
 .../java/org/apache/iceberg/util/ThreadPools.java  |   32 +-
 .../main/java/org/apache/iceberg/util/WapUtil.java |   15 +-
 .../org/apache/iceberg/util/ZOrderByteUtils.java   |   94 +-
 .../org/apache/iceberg/LocalTableOperations.java   |   17 +-
 .../java/org/apache/iceberg/MockFileScanTask.java  |    1 -
 .../test/java/org/apache/iceberg/ScanTestBase.java |  152 +-
 .../iceberg/TableMetadataParserCodecTest.java      |    4 +-
 .../apache/iceberg/TableMetadataParserTest.java    |   16 +-
 .../java/org/apache/iceberg/TableTestBase.java     |  357 +--
 .../iceberg/TestBaseIncrementalAppendScan.java     |   60 +-
 .../iceberg/TestCatalogErrorConstructor.java       |   17 +-
 .../java/org/apache/iceberg/TestCatalogUtil.java   |   95 +-
 .../apache/iceberg/TestCreateSnapshotEvent.java    |   98 +-
 .../org/apache/iceberg/TestCreateTransaction.java  |  294 ++-
 .../java/org/apache/iceberg/TestDataTableScan.java |    1 -
 .../org/apache/iceberg/TestDeleteFileIndex.java    |  445 ++--
 .../java/org/apache/iceberg/TestDeleteFiles.java   |  262 +-
 .../apache/iceberg/TestEntriesMetadataTable.java   |   90 +-
 .../java/org/apache/iceberg/TestFastAppend.java    |  323 +--
 .../java/org/apache/iceberg/TestFilterFiles.java   |   69 +-
 .../java/org/apache/iceberg/TestFindFiles.java     |  129 +-
 .../TestFixedSizeSplitScanTaskIterator.java        |   24 +-
 .../org/apache/iceberg/TestFormatVersions.java     |   18 +-
 .../iceberg/TestIncrementalDataTableScan.java      |   86 +-
 .../org/apache/iceberg/TestLocationProvider.java   |  166 +-
 .../org/apache/iceberg/TestManifestCleanup.java    |   91 +-
 .../apache/iceberg/TestManifestListVersions.java   |  244 +-
 .../org/apache/iceberg/TestManifestReader.java     |   48 +-
 .../apache/iceberg/TestManifestReaderStats.java    |   72 +-
 .../org/apache/iceberg/TestManifestWriter.java     |   69 +-
 .../apache/iceberg/TestManifestWriterVersions.java |  145 +-
 .../java/org/apache/iceberg/TestMergeAppend.java   | 1055 ++++----
 .../apache/iceberg/TestMetadataTableFilters.java   |  473 ++--
 .../org/apache/iceberg/TestMetadataTableScans.java |  641 +++--
 .../apache/iceberg/TestMetadataUpdateParser.java   |  698 +++---
 .../test/java/org/apache/iceberg/TestMetrics.java  |  343 +--
 .../java/org/apache/iceberg/TestMetricsModes.java  |  146 +-
 .../org/apache/iceberg/TestMetricsTruncation.java  |  213 +-
 .../org/apache/iceberg/TestMicroBatchBuilder.java  |   84 +-
 .../TestOffsetsBasedSplitScanTaskIterator.java     |   37 +-
 .../java/org/apache/iceberg/TestOverwrite.java     |  251 +-
 .../iceberg/TestOverwriteWithValidation.java       | 1006 ++++----
 .../org/apache/iceberg/TestPartitionSpecInfo.java  |   29 +-
 .../apache/iceberg/TestPartitionSpecParser.java    |  116 +-
 .../java/org/apache/iceberg/TestPartitioning.java  |  148 +-
 .../org/apache/iceberg/TestRemoveSnapshots.java    | 1006 ++++----
 .../org/apache/iceberg/TestReplacePartitions.java  |  558 +++--
 .../org/apache/iceberg/TestReplaceTransaction.java |  315 +--
 .../java/org/apache/iceberg/TestRewriteFiles.java  |  535 ++--
 .../org/apache/iceberg/TestRewriteManifests.java   |  801 +++---
 .../test/java/org/apache/iceberg/TestRowDelta.java | 1147 +++++----
 .../apache/iceberg/TestScanDataFileColumns.java    |  104 +-
 .../java/org/apache/iceberg/TestScanSummary.java   |  108 +-
 .../iceberg/TestScansAndSchemaEvolution.java       |   49 +-
 .../apache/iceberg/TestSchemaAndMappingUpdate.java |  222 +-
 .../test/java/org/apache/iceberg/TestSchemaID.java |  101 +-
 .../apache/iceberg/TestSchemaUnionByFieldName.java |  444 +++-
 .../java/org/apache/iceberg/TestSchemaUpdate.java  | 2288 +++++++++--------
 .../iceberg/TestSequenceNumberForV2Table.java      |  189 +-
 .../test/java/org/apache/iceberg/TestSnapshot.java |   78 +-
 .../java/org/apache/iceberg/TestSnapshotJson.java  |  157 +-
 .../org/apache/iceberg/TestSnapshotManager.java    |  556 ++---
 .../org/apache/iceberg/TestSnapshotRefParser.java  |  158 +-
 .../org/apache/iceberg/TestSnapshotSelection.java  |   44 +-
 .../org/apache/iceberg/TestSnapshotSummary.java    |   24 +-
 .../java/org/apache/iceberg/TestSortOrder.java     |  217 +-
 .../org/apache/iceberg/TestSortOrderParser.java    |   29 +-
 .../java/org/apache/iceberg/TestSplitPlanning.java |   98 +-
 .../java/org/apache/iceberg/TestTableMetadata.java | 1630 ++++++++-----
 .../iceberg/TestTableMetadataSerialization.java    |   36 +-
 .../iceberg/TestTableUpdatePartitionSpec.java      |  231 +-
 .../test/java/org/apache/iceberg/TestTables.java   |   71 +-
 .../apache/iceberg/TestTimestampPartitions.java    |   47 +-
 .../java/org/apache/iceberg/TestTransaction.java   |  535 ++--
 .../apache/iceberg/TestUpdatePartitionSpec.java    |  774 +++---
 .../apache/iceberg/TestV1ToV2RowDeltaDelete.java   |  208 +-
 .../java/org/apache/iceberg/TestWapWorkflow.java   |  662 ++---
 .../org/apache/iceberg/TestableCachingCatalog.java |   16 +-
 .../java/org/apache/iceberg/V2TableTestBase.java   |    1 -
 .../iceberg/actions/TestBinPackStrategy.java       |  275 ++-
 .../apache/iceberg/actions/TestSortStrategy.java   |   80 +-
 .../java/org/apache/iceberg/avro/AvroDataTest.java |  197 +-
 .../org/apache/iceberg/avro/AvroTestHelpers.java   |   13 +-
 .../org/apache/iceberg/avro/RandomAvroData.java    |    7 +-
 .../apache/iceberg/avro/TestAvroDataWriter.java    |   43 +-
 .../apache/iceberg/avro/TestAvroDeleteWriters.java |  117 +-
 .../apache/iceberg/avro/TestAvroEncoderUtil.java   |    7 +-
 .../org/apache/iceberg/avro/TestAvroEnums.java     |   29 +-
 .../org/apache/iceberg/avro/TestAvroFileSplit.java |  125 +-
 .../apache/iceberg/avro/TestAvroNameMapping.java   |  331 ++-
 .../avro/TestAvroOptionsWithNonNullDefaults.java   |   78 +-
 .../iceberg/avro/TestAvroReadProjection.java       |   45 +-
 .../iceberg/avro/TestAvroSchemaProjection.java     |  133 +-
 .../iceberg/avro/TestBuildAvroProjection.java      |  478 ++--
 .../org/apache/iceberg/avro/TestGenericAvro.java   |   12 +-
 .../java/org/apache/iceberg/avro/TestHasIds.java   |   35 +-
 .../apache/iceberg/avro/TestReadProjection.java    |  519 ++--
 .../apache/iceberg/avro/TestSchemaConversions.java |  460 ++--
 .../org/apache/iceberg/catalog/CatalogTests.java   | 1354 ++++++-----
 .../iceberg/catalog/TestTableIdentifierParser.java |   84 +-
 .../apache/iceberg/deletes/TestEqualityFilter.java |  185 +-
 .../apache/iceberg/deletes/TestPositionFilter.java |  428 ++--
 .../org/apache/iceberg/encryption/TestCiphers.java |    1 -
 .../iceberg/encryption/kms/KeyStoreKmsClient.java  |   18 +-
 .../iceberg/encryption/kms/MemoryMockKMS.java      |   11 +-
 .../apache/iceberg/hadoop/HadoopFileIOTest.java    |   70 +-
 .../apache/iceberg/hadoop/HadoopTableTestBase.java |  134 +-
 .../apache/iceberg/hadoop/TestCachingCatalog.java  |  126 +-
 .../iceberg/hadoop/TestCatalogUtilDropTable.java   |   93 +-
 .../apache/iceberg/hadoop/TestHadoopCatalog.java   |  302 ++-
 .../apache/iceberg/hadoop/TestHadoopCommits.java   |  417 ++--
 .../apache/iceberg/hadoop/TestHadoopTables.java    |   75 +-
 .../org/apache/iceberg/hadoop/TestStaticTable.java |   40 +-
 .../iceberg/hadoop/TestTableSerialization.java     |   65 +-
 .../org/apache/iceberg/io/InMemoryInputFile.java   |   11 +-
 .../org/apache/iceberg/io/InMemoryOutputFile.java  |    4 +-
 .../org/apache/iceberg/io/MockInputStream.java     |    4 +-
 .../iceberg/io/TestByteBufferInputStreams.java     |  200 +-
 .../java/org/apache/iceberg/io/TestIOUtil.java     |   52 +-
 .../apache/iceberg/io/TestInMemoryInputFile.java   |    7 +-
 .../apache/iceberg/io/TestInMemoryOutputFile.java  |    4 +-
 .../iceberg/io/TestMultiBufferInputStream.java     |   33 +-
 .../apache/iceberg/io/TestOutputFileFactory.java   |   26 +-
 .../iceberg/io/TestSingleBufferInputStream.java    |   34 +-
 .../org/apache/iceberg/jdbc/TestJdbcCatalog.java   |  357 +--
 .../iceberg/jdbc/TestJdbcTableConcurrency.java     |   89 +-
 .../java/org/apache/iceberg/jdbc/TestJdbcUtil.java |    1 -
 .../apache/iceberg/mapping/TestMappingUpdates.java |  163 +-
 .../apache/iceberg/mapping/TestNameMapping.java    |  307 ++-
 .../iceberg/puffin/PuffinFormatTestUtil.java       |    4 +-
 .../iceberg/puffin/TestFileMetadataParser.java     |  145 +-
 .../apache/iceberg/puffin/TestPuffinFormat.java    |   18 +-
 .../apache/iceberg/puffin/TestPuffinReader.java    |   88 +-
 .../apache/iceberg/puffin/TestPuffinWriter.java    |   59 +-
 .../java/org/apache/iceberg/rest/HttpMethod.java   |    1 -
 .../apache/iceberg/rest/RESTCatalogAdapter.java    |  254 +-
 .../iceberg/rest/RequestResponseTestBase.java      |   41 +-
 .../org/apache/iceberg/rest/TestHTTPClient.java    |   91 +-
 .../org/apache/iceberg/rest/TestRESTCatalog.java   | 1086 +++++----
 .../java/org/apache/iceberg/rest/TestRESTUtil.java |   61 +-
 .../org/apache/iceberg/rest/TestResourcePaths.java |   13 +-
 .../apache/iceberg/rest/auth/TestOAuth2Util.java   |   28 +-
 .../rest/requests/TestCreateNamespaceRequest.java  |   59 +-
 .../rest/requests/TestCreateTableRequest.java      |  249 +-
 .../rest/requests/TestRenameTableRequest.java      |   63 +-
 .../TestUpdateNamespacePropertiesRequest.java      |   87 +-
 .../rest/requests/TestUpdateRequirementParser.java |  178 +-
 .../iceberg/rest/responses/TestConfigResponse.java |  116 +-
 .../responses/TestCreateNamespaceResponse.java     |   72 +-
 .../rest/responses/TestErrorResponseParser.java    |   81 +-
 .../rest/responses/TestGetNamespaceResponse.java   |   49 +-
 .../rest/responses/TestListNamespacesResponse.java |   45 +-
 .../rest/responses/TestListTablesResponse.java     |   50 +-
 .../rest/responses/TestLoadTableResponse.java      |  207 +-
 .../rest/responses/TestOAuthTokenResponse.java     |   19 +-
 .../TestUpdateNamespacePropertiesResponse.java     |  121 +-
 .../java/org/apache/iceberg/util/FakeTicker.java   |    8 +-
 .../org/apache/iceberg/util/TestBinPacking.java    |  215 +-
 .../apache/iceberg/util/TestEnvironmentUtil.java   |   17 +-
 .../iceberg/util/TestInMemoryLockManager.java      |  134 +-
 .../org/apache/iceberg/util/TestLocationUtil.java  |   29 +-
 .../org/apache/iceberg/util/TestLockManagers.java  |   12 +-
 .../apache/iceberg/util/TestReachableFileUtil.java |   71 +-
 .../org/apache/iceberg/util/TestSortOrderUtil.java |  373 ++-
 .../org/apache/iceberg/util/TestStructLikeMap.java |    9 +-
 .../org/apache/iceberg/util/TestStructLikeSet.java |    9 +-
 .../org/apache/iceberg/util/TestTableScanUtil.java |  122 +-
 .../apache/iceberg/util/TestZOrderByteUtil.java    |  235 +-
 .../apache/iceberg/GenericOrcReaderBenchmark.java  |    1 -
 .../iceberg/GenericParquetReaderBenchmark.java     |    1 -
 .../java/org/apache/iceberg/ReaderBenchmark.java   |   26 +-
 .../apache/iceberg/data/BaseFileWriterFactory.java |  216 +-
 .../java/org/apache/iceberg/data/DeleteFilter.java |  112 +-
 .../iceberg/data/GenericAppenderFactory.java       |   47 +-
 .../apache/iceberg/data/GenericDeleteFilter.java   |    4 +-
 .../org/apache/iceberg/data/GenericReader.java     |   64 +-
 .../org/apache/iceberg/data/IcebergGenerics.java   |    9 +-
 .../apache/iceberg/data/InternalRecordWrapper.java |    9 +-
 .../apache/iceberg/data/TableMigrationUtil.java    |  141 +-
 .../org/apache/iceberg/data/TableScanIterable.java |    1 -
 .../java/org/apache/iceberg/RecordWrapperTest.java |   96 +-
 .../apache/iceberg/TestGenericAppenderFactory.java |   14 +-
 .../org/apache/iceberg/TestMergingMetrics.java     |  220 +-
 .../java/org/apache/iceberg/TestSplitScan.java     |   42 +-
 .../java/org/apache/iceberg/data/DataTest.java     |  196 +-
 .../org/apache/iceberg/data/DataTestHelpers.java   |   19 +-
 .../org/apache/iceberg/data/DeleteReadTests.java   |  397 +--
 .../java/org/apache/iceberg/data/FileHelpers.java  |   50 +-
 .../apache/iceberg/data/GenericAppenderHelper.java |   20 +-
 .../org/apache/iceberg/data/RandomGenericData.java |   71 +-
 .../data/TestDataFileIndexStatsFilters.java        |  152 +-
 .../iceberg/data/TestGenericReaderDeletes.java     |    7 +-
 .../org/apache/iceberg/data/TestGenericRecord.java |   21 +-
 .../org/apache/iceberg/data/TestLocalScan.java     |  459 ++--
 .../iceberg/data/TestMetricsRowGroupFilter.java    |  273 ++-
 .../data/TestMetricsRowGroupFilterTypes.java       |  219 +-
 .../apache/iceberg/data/TestReadProjection.java    |  496 ++--
 .../apache/iceberg/data/avro/TestGenericData.java  |   21 +-
 .../data/avro/TestGenericReadProjection.java       |   19 +-
 .../data/avro/TestSingleMessageEncoding.java       |   62 +-
 .../apache/iceberg/data/orc/TestGenericData.java   |  117 +-
 .../data/orc/TestGenericReadProjection.java        |   24 +-
 .../apache/iceberg/data/orc/TestOrcDataWriter.java |   43 +-
 .../iceberg/data/orc/TestOrcRowIterator.java       |   49 +-
 .../iceberg/data/parquet/TestGenericData.java      |   59 +-
 .../data/parquet/TestGenericReadProjection.java    |   20 +-
 .../org/apache/iceberg/io/TestAppenderFactory.java |  178 +-
 .../org/apache/iceberg/io/TestBaseTaskWriter.java  |   46 +-
 .../apache/iceberg/io/TestFileWriterFactory.java   |  239 +-
 .../io/TestGenericSortedPosDeleteWriter.java       |  157 +-
 .../apache/iceberg/io/TestPartitioningWriters.java |  277 +--
 .../iceberg/io/TestPositionDeltaWriters.java       |  128 +-
 .../apache/iceberg/io/TestRollingFileWriters.java  |   58 +-
 .../iceberg/io/TestTaskEqualityDeltaWriter.java    |  256 +-
 .../org/apache/iceberg/io/TestWriterMetrics.java   |  157 +-
 .../java/org/apache/iceberg/io/WriterTestBase.java |   25 +-
 .../org/apache/iceberg/orc/TestOrcMetrics.java     |   48 +-
 .../iceberg/parquet/TestGenericMergingMetrics.java |    6 +-
 .../apache/iceberg/parquet/TestParquetMetrics.java |   39 +-
 .../apache/iceberg/dell/DellClientFactories.java   |   26 +-
 .../org/apache/iceberg/dell/DellClientFactory.java |    1 -
 .../org/apache/iceberg/dell/DellProperties.java    |   20 +-
 .../org/apache/iceberg/dell/ecs/BaseEcsFile.java   |    1 -
 .../iceberg/dell/ecs/EcsAppendOutputStream.java    |   63 +-
 .../org/apache/iceberg/dell/ecs/EcsCatalog.java    |  199 +-
 .../org/apache/iceberg/dell/ecs/EcsFileIO.java     |   21 +-
 .../org/apache/iceberg/dell/ecs/EcsInputFile.java  |   14 +-
 .../org/apache/iceberg/dell/ecs/EcsOutputFile.java |   21 +-
 .../iceberg/dell/ecs/EcsSeekableInputStream.java   |   24 +-
 .../iceberg/dell/ecs/EcsTableOperations.java       |   26 +-
 .../java/org/apache/iceberg/dell/ecs/EcsURI.java   |   25 +-
 .../iceberg/dell/ecs/PropertiesSerDesUtil.java     |   29 +-
 .../dell/ecs/TestEcsAppendOutputStream.java        |   53 +-
 .../apache/iceberg/dell/ecs/TestEcsCatalog.java    |   38 +-
 .../apache/iceberg/dell/ecs/TestEcsInputFile.java  |   17 +-
 .../apache/iceberg/dell/ecs/TestEcsOutputFile.java |   41 +-
 .../dell/ecs/TestEcsSeekableInputStream.java       |   48 +-
 .../iceberg/dell/ecs/TestEcsTableOperations.java   |   24 +-
 .../org/apache/iceberg/dell/ecs/TestEcsURI.java    |    1 -
 .../iceberg/dell/ecs/TestPropertiesSerDesUtil.java |    6 +-
 .../iceberg/dell/mock/MockDellClientFactory.java   |    9 +-
 .../iceberg/dell/mock/ecs/EcsS3MockRule.java       |   28 +-
 .../apache/iceberg/dell/mock/ecs/MockS3Client.java |   65 +-
 .../apache/iceberg/dell/mock/ecs/ObjectData.java   |    5 +-
 .../org/apache/iceberg/dell/mock/ecs/ObjectId.java |   11 +-
 .../iceberg/dell/mock/ecs/TestExceptionCode.java   |   28 +-
 .../org/apache/iceberg/flink/CatalogLoader.java    |   53 +-
 .../org/apache/iceberg/flink/FlinkCatalog.java     |  213 +-
 .../apache/iceberg/flink/FlinkCatalogFactory.java  |   76 +-
 .../org/apache/iceberg/flink/FlinkConfParser.java  |    1 -
 .../apache/iceberg/flink/FlinkConfigOptions.java   |   13 +-
 .../iceberg/flink/FlinkDynamicTableFactory.java    |   51 +-
 .../org/apache/iceberg/flink/FlinkFilters.java     |  162 +-
 .../org/apache/iceberg/flink/FlinkFixupTypes.java  |    5 +-
 .../org/apache/iceberg/flink/FlinkSchemaUtil.java  |   57 +-
 .../org/apache/iceberg/flink/FlinkTypeToType.java  |    8 +-
 .../org/apache/iceberg/flink/FlinkTypeVisitor.java |    1 -
 .../org/apache/iceberg/flink/FlinkWriteConf.java   |   64 +-
 .../apache/iceberg/flink/FlinkWriteOptions.java    |   24 +-
 .../org/apache/iceberg/flink/IcebergTableSink.java |   26 +-
 .../apache/iceberg/flink/IcebergTableSource.java   |   41 +-
 .../org/apache/iceberg/flink/RowDataWrapper.java   |   16 +-
 .../java/org/apache/iceberg/flink/TableLoader.java |   14 +-
 .../org/apache/iceberg/flink/TypeToFlinkType.java  |   14 +-
 .../org/apache/iceberg/flink/actions/Actions.java  |    9 +-
 .../flink/actions/RewriteDataFilesAction.java      |    4 +-
 .../flink/data/AvroWithFlinkSchemaVisitor.java     |   10 +-
 .../apache/iceberg/flink/data/FlinkAvroReader.java |   20 +-
 .../apache/iceberg/flink/data/FlinkAvroWriter.java |   35 +-
 .../apache/iceberg/flink/data/FlinkOrcReader.java  |   28 +-
 .../apache/iceberg/flink/data/FlinkOrcReaders.java |   51 +-
 .../apache/iceberg/flink/data/FlinkOrcWriter.java  |   48 +-
 .../apache/iceberg/flink/data/FlinkOrcWriters.java |   68 +-
 .../iceberg/flink/data/FlinkParquetReaders.java    |  126 +-
 .../iceberg/flink/data/FlinkParquetWriters.java    |  142 +-
 .../iceberg/flink/data/FlinkSchemaVisitor.java     |   15 +-
 .../iceberg/flink/data/FlinkValueReaders.java      |   23 +-
 .../iceberg/flink/data/FlinkValueWriters.java      |   44 +-
 .../flink/data/ParquetWithFlinkSchemaVisitor.java  |   89 +-
 .../iceberg/flink/data/RowDataProjection.java      |   78 +-
 .../org/apache/iceberg/flink/data/RowDataUtil.java |   17 +-
 .../iceberg/flink/sink/BaseDeltaTaskWriter.java    |   28 +-
 .../apache/iceberg/flink/sink/DeltaManifests.java  |    4 +-
 .../flink/sink/DeltaManifestsSerializer.java       |    4 +-
 .../flink/sink/EqualityFieldKeySelector.java       |   17 +-
 .../iceberg/flink/sink/FlinkAppenderFactory.java   |   64 +-
 .../iceberg/flink/sink/FlinkFileWriterFactory.java |  103 +-
 .../iceberg/flink/sink/FlinkManifestUtil.java      |   41 +-
 .../org/apache/iceberg/flink/sink/FlinkSink.java   |  323 ++-
 .../iceberg/flink/sink/IcebergFilesCommitter.java  |  196 +-
 .../iceberg/flink/sink/IcebergStreamWriter.java    |    4 +-
 .../flink/sink/ManifestOutputFileFactory.java      |   22 +-
 .../iceberg/flink/sink/PartitionKeySelector.java   |   10 +-
 .../iceberg/flink/sink/PartitionedDeltaWriter.java |   36 +-
 .../flink/sink/RowDataTaskWriterFactory.java       |  105 +-
 .../iceberg/flink/sink/TaskWriterFactory.java      |    3 +-
 .../flink/sink/UnpartitionedDeltaWriter.java       |   33 +-
 .../apache/iceberg/flink/source/DataIterator.java  |   13 +-
 .../iceberg/flink/source/FileScanTaskReader.java   |    1 -
 .../iceberg/flink/source/FlinkInputFormat.java     |   27 +-
 .../iceberg/flink/source/FlinkInputSplit.java      |    1 -
 .../apache/iceberg/flink/source/FlinkSource.java   |   65 +-
 .../iceberg/flink/source/FlinkSplitGenerator.java  |   30 +-
 .../flink/source/RowDataFileScanTaskReader.java    |  105 +-
 .../iceberg/flink/source/RowDataRewriter.java      |   52 +-
 .../apache/iceberg/flink/source/ScanContext.java   |   52 +-
 .../flink/source/StreamingMonitorFunction.java     |   48 +-
 .../flink/source/StreamingReaderOperator.java      |   72 +-
 .../iceberg/flink/util/FlinkCompatibilityUtil.java |    9 +-
 .../apache/iceberg/flink/FlinkCatalogTestBase.java |   24 +-
 .../org/apache/iceberg/flink/FlinkTestBase.java    |   25 +-
 .../apache/iceberg/flink/MiniClusterResource.java  |   22 +-
 .../org/apache/iceberg/flink/RowDataConverter.java |   14 +-
 .../org/apache/iceberg/flink/SimpleDataUtil.java   |  110 +-
 .../iceberg/flink/TestCatalogTableLoader.java      |   21 +-
 .../apache/iceberg/flink/TestChangeLogTable.java   |  248 +-
 .../iceberg/flink/TestDataFileSerialization.java   |  106 +-
 .../org/apache/iceberg/flink/TestFixtures.java     |   24 +-
 .../iceberg/flink/TestFlinkCatalogDatabase.java    |  102 +-
 .../iceberg/flink/TestFlinkCatalogFactory.java     |   44 +-
 .../iceberg/flink/TestFlinkCatalogTable.java       |  186 +-
 .../flink/TestFlinkCatalogTablePartitions.java     |   33 +-
 .../org/apache/iceberg/flink/TestFlinkFilters.java |  288 ++-
 .../apache/iceberg/flink/TestFlinkHiveCatalog.java |   17 +-
 .../apache/iceberg/flink/TestFlinkSchemaUtil.java  |  504 ++--
 .../apache/iceberg/flink/TestFlinkTableSink.java   |  208 +-
 .../apache/iceberg/flink/TestFlinkTableSource.java |  330 +--
 .../org/apache/iceberg/flink/TestFlinkUpsert.java  |  141 +-
 .../java/org/apache/iceberg/flink/TestHelpers.java |  202 +-
 .../apache/iceberg/flink/TestIcebergConnector.java |  205 +-
 .../flink/TestManifestFileSerialization.java       |  102 +-
 .../apache/iceberg/flink/TestRowDataWrapper.java   |   48 +-
 .../org/apache/iceberg/flink/TestTableLoader.java  |    9 +-
 .../iceberg/flink/TestTableSerialization.java      |   51 +-
 .../flink/actions/TestRewriteDataFilesAction.java  |  202 +-
 .../apache/iceberg/flink/data/RandomRowData.java   |    4 +-
 .../flink/data/TestFlinkAvroReaderWriter.java      |   43 +-
 .../flink/data/TestFlinkOrcReaderWriter.java       |   43 +-
 .../iceberg/flink/data/TestFlinkParquetReader.java |   26 +-
 .../iceberg/flink/data/TestFlinkParquetWriter.java |   38 +-
 .../iceberg/flink/data/TestRowDataProjection.java  |  316 +--
 .../iceberg/flink/data/TestRowProjection.java      |  402 +--
 .../iceberg/flink/sink/TestDeltaTaskWriter.java    |  103 +-
 .../flink/sink/TestFlinkAppenderFactory.java       |   16 +-
 .../flink/sink/TestFlinkFileWriterFactory.java     |    9 +-
 .../iceberg/flink/sink/TestFlinkIcebergSink.java   |  168 +-
 .../iceberg/flink/sink/TestFlinkIcebergSinkV2.java |  498 ++--
 .../iceberg/flink/sink/TestFlinkManifest.java      |  140 +-
 .../flink/sink/TestFlinkPartitioningWriters.java   |    9 +-
 .../flink/sink/TestFlinkPositionDeltaWriters.java  |    9 +-
 .../flink/sink/TestFlinkRollingFileWriters.java    |    9 +-
 .../iceberg/flink/sink/TestFlinkWriterMetrics.java |    1 -
 .../flink/sink/TestIcebergFilesCommitter.java      |  176 +-
 .../flink/sink/TestIcebergStreamWriter.java        |  124 +-
 .../flink/sink/TestRowDataPartitionKey.java        |  149 +-
 .../apache/iceberg/flink/sink/TestTaskWriters.java |   40 +-
 .../iceberg/flink/source/BoundedTableFactory.java  |   21 +-
 .../iceberg/flink/source/BoundedTestSource.java    |   39 +-
 .../flink/source/ChangeLogTableTestBase.java       |   26 +-
 .../flink/source/TestBoundedTableFactory.java      |   62 +-
 .../iceberg/flink/source/TestFlinkInputFormat.java |   77 +-
 .../source/TestFlinkInputFormatReaderDeletes.java  |   32 +-
 .../flink/source/TestFlinkMergingMetrics.java      |    4 +-
 .../flink/source/TestFlinkReaderDeletesBase.java   |   16 +-
 .../apache/iceberg/flink/source/TestFlinkScan.java |  158 +-
 .../iceberg/flink/source/TestFlinkScanSql.java     |  134 +-
 .../iceberg/flink/source/TestFlinkSource.java      |   21 +-
 .../flink/source/TestProjectMetaColumn.java        |  111 +-
 .../iceberg/flink/source/TestStreamScanSql.java    |   54 +-
 .../flink/source/TestStreamingMonitorFunction.java |  103 +-
 .../flink/source/TestStreamingReaderOperator.java  |   62 +-
 .../org/apache/iceberg/flink/CatalogLoader.java    |   53 +-
 .../org/apache/iceberg/flink/FlinkCatalog.java     |  224 +-
 .../apache/iceberg/flink/FlinkCatalogFactory.java  |   61 +-
 .../org/apache/iceberg/flink/FlinkConfParser.java  |    1 -
 .../apache/iceberg/flink/FlinkConfigOptions.java   |   32 +-
 .../iceberg/flink/FlinkDynamicTableFactory.java    |   51 +-
 .../org/apache/iceberg/flink/FlinkFilters.java     |  162 +-
 .../org/apache/iceberg/flink/FlinkFixupTypes.java  |    5 +-
 .../org/apache/iceberg/flink/FlinkSchemaUtil.java  |   57 +-
 .../org/apache/iceberg/flink/FlinkTypeToType.java  |    8 +-
 .../org/apache/iceberg/flink/FlinkTypeVisitor.java |    1 -
 .../org/apache/iceberg/flink/FlinkWriteConf.java   |   67 +-
 .../apache/iceberg/flink/FlinkWriteOptions.java    |   24 +-
 .../org/apache/iceberg/flink/IcebergTableSink.java |   31 +-
 .../apache/iceberg/flink/IcebergTableSource.java   |   41 +-
 .../org/apache/iceberg/flink/RowDataWrapper.java   |   16 +-
 .../java/org/apache/iceberg/flink/TableLoader.java |   14 +-
 .../org/apache/iceberg/flink/TypeToFlinkType.java  |   14 +-
 .../org/apache/iceberg/flink/actions/Actions.java  |    9 +-
 .../flink/actions/RewriteDataFilesAction.java      |    4 +-
 .../flink/data/AvroWithFlinkSchemaVisitor.java     |   10 +-
 .../apache/iceberg/flink/data/FlinkAvroReader.java |   20 +-
 .../apache/iceberg/flink/data/FlinkAvroWriter.java |   35 +-
 .../apache/iceberg/flink/data/FlinkOrcReader.java  |   28 +-
 .../apache/iceberg/flink/data/FlinkOrcReaders.java |   51 +-
 .../apache/iceberg/flink/data/FlinkOrcWriter.java  |   48 +-
 .../apache/iceberg/flink/data/FlinkOrcWriters.java |   68 +-
 .../iceberg/flink/data/FlinkParquetReaders.java    |  126 +-
 .../iceberg/flink/data/FlinkParquetWriters.java    |  142 +-
 .../iceberg/flink/data/FlinkSchemaVisitor.java     |   15 +-
 .../iceberg/flink/data/FlinkValueReaders.java      |   23 +-
 .../iceberg/flink/data/FlinkValueWriters.java      |   44 +-
 .../flink/data/ParquetWithFlinkSchemaVisitor.java  |   83 +-
 .../iceberg/flink/data/RowDataProjection.java      |   78 +-
 .../org/apache/iceberg/flink/data/RowDataUtil.java |   17 +-
 .../iceberg/flink/sink/BaseDeltaTaskWriter.java    |   28 +-
 .../apache/iceberg/flink/sink/DeltaManifests.java  |    4 +-
 .../flink/sink/DeltaManifestsSerializer.java       |    4 +-
 .../flink/sink/EqualityFieldKeySelector.java       |   17 +-
 .../iceberg/flink/sink/FlinkAppenderFactory.java   |   64 +-
 .../iceberg/flink/sink/FlinkFileWriterFactory.java |  103 +-
 .../iceberg/flink/sink/FlinkManifestUtil.java      |   48 +-
 .../org/apache/iceberg/flink/sink/FlinkSink.java   |  329 +--
 .../iceberg/flink/sink/IcebergFilesCommitter.java  |  211 +-
 .../iceberg/flink/sink/IcebergStreamWriter.java    |    4 +-
 .../flink/sink/ManifestOutputFileFactory.java      |   28 +-
 .../iceberg/flink/sink/PartitionKeySelector.java   |   10 +-
 .../iceberg/flink/sink/PartitionedDeltaWriter.java |   36 +-
 .../flink/sink/RowDataTaskWriterFactory.java       |  105 +-
 .../iceberg/flink/sink/TaskWriterFactory.java      |    3 +-
 .../flink/sink/UnpartitionedDeltaWriter.java       |   33 +-
 .../apache/iceberg/flink/source/DataIterator.java  |   37 +-
 .../iceberg/flink/source/FileScanTaskReader.java   |    1 -
 .../iceberg/flink/source/FlinkInputFormat.java     |   30 +-
 .../iceberg/flink/source/FlinkInputSplit.java      |    1 -
 .../apache/iceberg/flink/source/FlinkSource.java   |   65 +-
 .../iceberg/flink/source/FlinkSplitPlanner.java    |   61 +-
 .../apache/iceberg/flink/source/IcebergSource.java |   44 +-
 .../flink/source/RowDataFileScanTaskReader.java    |  105 +-
 .../iceberg/flink/source/RowDataRewriter.java      |   52 +-
 .../apache/iceberg/flink/source/ScanContext.java   |   79 +-
 .../flink/source/StreamingMonitorFunction.java     |   58 +-
 .../flink/source/StreamingReaderOperator.java      |   72 +-
 .../flink/source/StreamingStartingStrategy.java    |   26 +-
 .../flink/source/assigner/GetSplitResult.java      |   10 +-
 .../flink/source/assigner/SimpleSplitAssigner.java |    9 +-
 .../assigner/SimpleSplitAssignerFactory.java       |    5 +-
 .../flink/source/assigner/SplitAssigner.java       |   85 +-
 .../source/assigner/SplitAssignerFactory.java      |    1 -
 .../enumerator/AbstractIcebergEnumerator.java      |   62 +-
 .../enumerator/ContinuousEnumerationResult.java    |    1 -
 .../enumerator/ContinuousIcebergEnumerator.java    |   41 +-
 .../source/enumerator/ContinuousSplitPlanner.java  |    9 +-
 .../enumerator/ContinuousSplitPlannerImpl.java     |  115 +-
 .../enumerator/IcebergEnumeratorPosition.java      |    9 +-
 .../IcebergEnumeratorPositionSerializer.java       |    7 +-
 .../source/enumerator/IcebergEnumeratorState.java  |    8 +-
 .../IcebergEnumeratorStateSerializer.java          |   16 +-
 .../source/enumerator/StaticIcebergEnumerator.java |   29 +-
 .../flink/source/reader/ArrayBatchRecords.java     |   78 +-
 .../reader/ArrayPoolDataIteratorBatcher.java       |   17 +-
 .../flink/source/reader/DataIteratorBatcher.java   |    9 +-
 .../source/reader/DataIteratorReaderFunction.java  |    9 +-
 .../flink/source/reader/IcebergSourceReader.java   |   10 +-
 .../source/reader/IcebergSourceRecordEmitter.java  |   11 +-
 .../source/reader/IcebergSourceSplitReader.java    |   38 +-
 .../flink/source/reader/ReaderFunction.java        |    8 +-
 .../flink/source/reader/ReaderMetricsContext.java  |    6 +-
 .../flink/source/reader/RecordAndPosition.java     |    5 +-
 .../iceberg/flink/source/reader/RecordFactory.java |   16 +-
 .../flink/source/reader/RowDataReaderFunction.java |   28 +-
 .../flink/source/reader/RowDataRecordFactory.java  |    1 -
 .../flink/source/split/IcebergSourceSplit.java     |   27 +-
 .../source/split/IcebergSourceSplitSerializer.java |   13 +-
 .../source/split/IcebergSourceSplitState.java      |    1 -
 .../source/split/IcebergSourceSplitStatus.java     |    1 -
 .../flink/source/split/SplitRequestEvent.java      |    5 +-
 .../iceberg/flink/util/FlinkCompatibilityUtil.java |    9 +-
 .../apache/iceberg/flink/FlinkCatalogTestBase.java |   24 +-
 .../org/apache/iceberg/flink/FlinkTestBase.java    |   23 +-
 .../apache/iceberg/flink/HadoopTableResource.java  |   15 +-
 .../apache/iceberg/flink/MiniClusterResource.java  |   22 +-
 .../org/apache/iceberg/flink/RowDataConverter.java |   14 +-
 .../org/apache/iceberg/flink/SimpleDataUtil.java   |  125 +-
 .../iceberg/flink/TestCatalogTableLoader.java      |   21 +-
 .../apache/iceberg/flink/TestChangeLogTable.java   |  248 +-
 .../iceberg/flink/TestDataFileSerialization.java   |  106 +-
 .../org/apache/iceberg/flink/TestFixtures.java     |   32 +-
 .../iceberg/flink/TestFlinkCatalogDatabase.java    |  102 +-
 .../iceberg/flink/TestFlinkCatalogFactory.java     |   44 +-
 .../iceberg/flink/TestFlinkCatalogTable.java       |  192 +-
 .../flink/TestFlinkCatalogTablePartitions.java     |   33 +-
 .../org/apache/iceberg/flink/TestFlinkFilters.java |  288 ++-
 .../apache/iceberg/flink/TestFlinkHiveCatalog.java |   17 +-
 .../apache/iceberg/flink/TestFlinkSchemaUtil.java  |  504 ++--
 .../apache/iceberg/flink/TestFlinkTableSink.java   |  203 +-
 .../apache/iceberg/flink/TestFlinkTableSource.java |  330 +--
 .../org/apache/iceberg/flink/TestFlinkUpsert.java  |  141 +-
 .../java/org/apache/iceberg/flink/TestHelpers.java |  202 +-
 .../apache/iceberg/flink/TestIcebergConnector.java |  200 +-
 .../flink/TestManifestFileSerialization.java       |  102 +-
 .../apache/iceberg/flink/TestRowDataWrapper.java   |   48 +-
 .../org/apache/iceberg/flink/TestTableLoader.java  |    9 +-
 .../iceberg/flink/TestTableSerialization.java      |   49 +-
 .../flink/actions/TestRewriteDataFilesAction.java  |  202 +-
 .../apache/iceberg/flink/data/RandomRowData.java   |    4 +-
 .../iceberg/flink/data/RowDataToRowMapper.java     |    5 +-
 .../flink/data/TestFlinkAvroReaderWriter.java      |  109 +-
 .../flink/data/TestFlinkOrcReaderWriter.java       |   43 +-
 .../iceberg/flink/data/TestFlinkParquetReader.java |   60 +-
 .../iceberg/flink/data/TestFlinkParquetWriter.java |   38 +-
 .../iceberg/flink/data/TestRowDataProjection.java  |  316 +--
 .../iceberg/flink/data/TestRowProjection.java      |  402 +--
 .../iceberg/flink/sink/TestDeltaTaskWriter.java    |  101 +-
 .../flink/sink/TestFlinkAppenderFactory.java       |   16 +-
 .../flink/sink/TestFlinkFileWriterFactory.java     |    9 +-
 .../iceberg/flink/sink/TestFlinkIcebergSink.java   |  172 +-
 .../iceberg/flink/sink/TestFlinkIcebergSinkV2.java |  498 ++--
 .../iceberg/flink/sink/TestFlinkManifest.java      |  148 +-
 .../flink/sink/TestFlinkPartitioningWriters.java   |    9 +-
 .../flink/sink/TestFlinkPositionDeltaWriters.java  |    9 +-
 .../flink/sink/TestFlinkRollingFileWriters.java    |    9 +-
 .../iceberg/flink/sink/TestFlinkWriterMetrics.java |    1 -
 .../flink/sink/TestIcebergFilesCommitter.java      |  182 +-
 .../flink/sink/TestIcebergStreamWriter.java        |  124 +-
 .../flink/sink/TestRowDataPartitionKey.java        |  149 +-
 .../apache/iceberg/flink/sink/TestTaskWriters.java |   40 +-
 .../iceberg/flink/source/BoundedTableFactory.java  |   21 +-
 .../iceberg/flink/source/BoundedTestSource.java    |   39 +-
 .../flink/source/ChangeLogTableTestBase.java       |   25 +-
 .../apache/iceberg/flink/source/SplitHelpers.java  |   37 +-
 .../flink/source/TestBoundedTableFactory.java      |   62 +-
 .../iceberg/flink/source/TestFlinkInputFormat.java |   77 +-
 .../source/TestFlinkInputFormatReaderDeletes.java  |   32 +-
 .../flink/source/TestFlinkMergingMetrics.java      |    4 +-
 .../flink/source/TestFlinkReaderDeletesBase.java   |   16 +-
 .../apache/iceberg/flink/source/TestFlinkScan.java |  158 +-
 .../iceberg/flink/source/TestFlinkScanSql.java     |  132 +-
 .../iceberg/flink/source/TestFlinkSource.java      |   21 +-
 .../flink/source/TestIcebergSourceBounded.java     |   33 +-
 .../flink/source/TestIcebergSourceContinuous.java  |  218 +-
 .../flink/source/TestIcebergSourceFailover.java    |   65 +-
 .../source/TestIcebergSourceReaderDeletes.java     |   57 +-
 .../flink/source/TestProjectMetaColumn.java        |  111 +-
 .../iceberg/flink/source/TestStreamScanSql.java    |   53 +-
 .../flink/source/TestStreamingMonitorFunction.java |  103 +-
 .../flink/source/TestStreamingReaderOperator.java  |   67 +-
 .../source/assigner/TestSimpleSplitAssigner.java   |   32 +-
 .../enumerator/ManualContinuousSplitPlanner.java   |   16 +-
 .../TestContinuousIcebergEnumerator.java           |   80 +-
 .../enumerator/TestContinuousSplitPlannerImpl.java |  327 +--
 ...estContinuousSplitPlannerImplStartStrategy.java |  136 +-
 .../TestIcebergEnumeratorStateSerializer.java      |   38 +-
 .../source/reader/ReaderFunctionTestBase.java      |   49 +-
 .../iceberg/flink/source/reader/ReaderUtil.java    |   40 +-
 .../flink/source/reader/TestArrayBatchRecords.java |   19 +-
 .../TestArrayPoolDataIteratorBatcherRowData.java   |   41 +-
 .../source/reader/TestRowDataReaderFunction.java   |   18 +-
 .../split/TestIcebergSourceSplitSerializer.java    |   52 +-
 .../org/apache/iceberg/flink/CatalogLoader.java    |   53 +-
 .../org/apache/iceberg/flink/FlinkCatalog.java     |  224 +-
 .../apache/iceberg/flink/FlinkCatalogFactory.java  |   61 +-
 .../org/apache/iceberg/flink/FlinkConfParser.java  |    1 -
 .../apache/iceberg/flink/FlinkConfigOptions.java   |   32 +-
 .../iceberg/flink/FlinkDynamicTableFactory.java    |   51 +-
 .../org/apache/iceberg/flink/FlinkFilters.java     |  162 +-
 .../org/apache/iceberg/flink/FlinkFixupTypes.java  |    5 +-
 .../org/apache/iceberg/flink/FlinkSchemaUtil.java  |   57 +-
 .../org/apache/iceberg/flink/FlinkTypeToType.java  |    8 +-
 .../org/apache/iceberg/flink/FlinkTypeVisitor.java |    1 -
 .../org/apache/iceberg/flink/FlinkWriteConf.java   |   67 +-
 .../apache/iceberg/flink/FlinkWriteOptions.java    |   24 +-
 .../org/apache/iceberg/flink/IcebergTableSink.java |   31 +-
 .../apache/iceberg/flink/IcebergTableSource.java   |   41 +-
 .../org/apache/iceberg/flink/RowDataWrapper.java   |   16 +-
 .../java/org/apache/iceberg/flink/TableLoader.java |   14 +-
 .../org/apache/iceberg/flink/TypeToFlinkType.java  |   14 +-
 .../org/apache/iceberg/flink/actions/Actions.java  |    9 +-
 .../flink/actions/RewriteDataFilesAction.java      |    4 +-
 .../flink/data/AvroWithFlinkSchemaVisitor.java     |   10 +-
 .../apache/iceberg/flink/data/FlinkAvroReader.java |   20 +-
 .../apache/iceberg/flink/data/FlinkAvroWriter.java |   35 +-
 .../apache/iceberg/flink/data/FlinkOrcReader.java  |   28 +-
 .../apache/iceberg/flink/data/FlinkOrcReaders.java |   51 +-
 .../apache/iceberg/flink/data/FlinkOrcWriter.java  |   48 +-
 .../apache/iceberg/flink/data/FlinkOrcWriters.java |   68 +-
 .../iceberg/flink/data/FlinkParquetReaders.java    |  126 +-
 .../iceberg/flink/data/FlinkParquetWriters.java    |  142 +-
 .../iceberg/flink/data/FlinkSchemaVisitor.java     |   15 +-
 .../iceberg/flink/data/FlinkValueReaders.java      |   23 +-
 .../iceberg/flink/data/FlinkValueWriters.java      |   44 +-
 .../flink/data/ParquetWithFlinkSchemaVisitor.java  |   89 +-
 .../iceberg/flink/data/RowDataProjection.java      |   78 +-
 .../org/apache/iceberg/flink/data/RowDataUtil.java |   17 +-
 .../iceberg/flink/sink/BaseDeltaTaskWriter.java    |   28 +-
 .../apache/iceberg/flink/sink/DeltaManifests.java  |    4 +-
 .../flink/sink/DeltaManifestsSerializer.java       |    4 +-
 .../flink/sink/EqualityFieldKeySelector.java       |   17 +-
 .../iceberg/flink/sink/FlinkAppenderFactory.java   |   64 +-
 .../iceberg/flink/sink/FlinkFileWriterFactory.java |  103 +-
 .../iceberg/flink/sink/FlinkManifestUtil.java      |   48 +-
 .../org/apache/iceberg/flink/sink/FlinkSink.java   |  329 +--
 .../iceberg/flink/sink/IcebergFilesCommitter.java  |  211 +-
 .../iceberg/flink/sink/IcebergStreamWriter.java    |    4 +-
 .../flink/sink/ManifestOutputFileFactory.java      |   28 +-
 .../iceberg/flink/sink/PartitionKeySelector.java   |   10 +-
 .../iceberg/flink/sink/PartitionedDeltaWriter.java |   36 +-
 .../flink/sink/RowDataTaskWriterFactory.java       |  105 +-
 .../iceberg/flink/sink/TaskWriterFactory.java      |    3 +-
 .../flink/sink/UnpartitionedDeltaWriter.java       |   33 +-
 .../apache/iceberg/flink/source/DataIterator.java  |   37 +-
 .../iceberg/flink/source/FileScanTaskReader.java   |    1 -
 .../iceberg/flink/source/FlinkInputFormat.java     |   30 +-
 .../iceberg/flink/source/FlinkInputSplit.java      |    1 -
 .../apache/iceberg/flink/source/FlinkSource.java   |   65 +-
 .../iceberg/flink/source/FlinkSplitPlanner.java    |   61 +-
 .../apache/iceberg/flink/source/IcebergSource.java |   44 +-
 .../flink/source/RowDataFileScanTaskReader.java    |  105 +-
 .../iceberg/flink/source/RowDataRewriter.java      |   52 +-
 .../apache/iceberg/flink/source/ScanContext.java   |   82 +-
 .../flink/source/StreamingMonitorFunction.java     |   90 +-
 .../flink/source/StreamingReaderOperator.java      |   72 +-
 .../flink/source/StreamingStartingStrategy.java    |   26 +-
 .../flink/source/assigner/GetSplitResult.java      |   10 +-
 .../flink/source/assigner/SimpleSplitAssigner.java |    9 +-
 .../assigner/SimpleSplitAssignerFactory.java       |    5 +-
 .../flink/source/assigner/SplitAssigner.java       |   85 +-
 .../source/assigner/SplitAssignerFactory.java      |    1 -
 .../enumerator/AbstractIcebergEnumerator.java      |   62 +-
 .../enumerator/ContinuousEnumerationResult.java    |    1 -
 .../enumerator/ContinuousIcebergEnumerator.java    |   41 +-
 .../source/enumerator/ContinuousSplitPlanner.java  |    9 +-
 .../enumerator/ContinuousSplitPlannerImpl.java     |  115 +-
 .../enumerator/IcebergEnumeratorPosition.java      |    9 +-
 .../IcebergEnumeratorPositionSerializer.java       |    7 +-
 .../source/enumerator/IcebergEnumeratorState.java  |    8 +-
 .../IcebergEnumeratorStateSerializer.java          |   16 +-
 .../source/enumerator/StaticIcebergEnumerator.java |   29 +-
 .../flink/source/reader/ArrayBatchRecords.java     |   78 +-
 .../reader/ArrayPoolDataIteratorBatcher.java       |   17 +-
 .../flink/source/reader/DataIteratorBatcher.java   |    9 +-
 .../source/reader/DataIteratorReaderFunction.java  |    9 +-
 .../flink/source/reader/IcebergSourceReader.java   |   10 +-
 .../source/reader/IcebergSourceRecordEmitter.java  |   11 +-
 .../source/reader/IcebergSourceSplitReader.java    |   38 +-
 .../flink/source/reader/ReaderFunction.java        |    8 +-
 .../flink/source/reader/ReaderMetricsContext.java  |    6 +-
 .../flink/source/reader/RecordAndPosition.java     |    5 +-
 .../iceberg/flink/source/reader/RecordFactory.java |   16 +-
 .../flink/source/reader/RowDataReaderFunction.java |   28 +-
 .../flink/source/reader/RowDataRecordFactory.java  |    1 -
 .../flink/source/split/IcebergSourceSplit.java     |   27 +-
 .../source/split/IcebergSourceSplitSerializer.java |   13 +-
 .../source/split/IcebergSourceSplitState.java      |    1 -
 .../source/split/IcebergSourceSplitStatus.java     |    1 -
 .../flink/source/split/SplitRequestEvent.java      |    5 +-
 .../iceberg/flink/util/FlinkCompatibilityUtil.java |    9 +-
 .../apache/iceberg/flink/FlinkCatalogTestBase.java |   24 +-
 .../org/apache/iceberg/flink/FlinkTestBase.java    |   23 +-
 .../apache/iceberg/flink/HadoopTableResource.java  |   15 +-
 .../apache/iceberg/flink/MiniClusterResource.java  |   22 +-
 .../org/apache/iceberg/flink/RowDataConverter.java |   14 +-
 .../org/apache/iceberg/flink/SimpleDataUtil.java   |  126 +-
 .../iceberg/flink/TestCatalogTableLoader.java      |   21 +-
 .../apache/iceberg/flink/TestChangeLogTable.java   |  251 +-
 .../iceberg/flink/TestDataFileSerialization.java   |  106 +-
 .../org/apache/iceberg/flink/TestFixtures.java     |   36 +-
 .../iceberg/flink/TestFlinkCatalogDatabase.java    |  102 +-
 .../iceberg/flink/TestFlinkCatalogFactory.java     |   44 +-
 .../iceberg/flink/TestFlinkCatalogTable.java       |  192 +-
 .../flink/TestFlinkCatalogTablePartitions.java     |   33 +-
 .../org/apache/iceberg/flink/TestFlinkFilters.java |  288 ++-
 .../apache/iceberg/flink/TestFlinkHiveCatalog.java |   17 +-
 .../apache/iceberg/flink/TestFlinkSchemaUtil.java  |  504 ++--
 .../apache/iceberg/flink/TestFlinkTableSink.java   |  203 +-
 .../apache/iceberg/flink/TestFlinkTableSource.java |  330 +--
 .../org/apache/iceberg/flink/TestFlinkUpsert.java  |  141 +-
 .../java/org/apache/iceberg/flink/TestHelpers.java |  202 +-
 .../apache/iceberg/flink/TestIcebergConnector.java |  200 +-
 .../flink/TestManifestFileSerialization.java       |  102 +-
 .../apache/iceberg/flink/TestRowDataWrapper.java   |   48 +-
 .../org/apache/iceberg/flink/TestTableLoader.java  |    9 +-
 .../iceberg/flink/TestTableSerialization.java      |   49 +-
 .../flink/actions/TestRewriteDataFilesAction.java  |  202 +-
 .../apache/iceberg/flink/data/RandomRowData.java   |    4 +-
 .../iceberg/flink/data/RowDataToRowMapper.java     |    5 +-
 .../flink/data/TestFlinkAvroReaderWriter.java      |  109 +-
 .../flink/data/TestFlinkOrcReaderWriter.java       |   43 +-
 .../iceberg/flink/data/TestFlinkParquetReader.java |   60 +-
 .../iceberg/flink/data/TestFlinkParquetWriter.java |   38 +-
 .../iceberg/flink/data/TestRowDataProjection.java  |  316 +--
 .../iceberg/flink/data/TestRowProjection.java      |  402 +--
 .../iceberg/flink/sink/TestDeltaTaskWriter.java    |  101 +-
 .../flink/sink/TestFlinkAppenderFactory.java       |   16 +-
 .../flink/sink/TestFlinkFileWriterFactory.java     |    9 +-
 .../iceberg/flink/sink/TestFlinkIcebergSink.java   |  172 +-
 .../iceberg/flink/sink/TestFlinkIcebergSinkV2.java |  498 ++--
 .../iceberg/flink/sink/TestFlinkManifest.java      |  148 +-
 .../flink/sink/TestFlinkPartitioningWriters.java   |    9 +-
 .../flink/sink/TestFlinkPositionDeltaWriters.java  |    9 +-
 .../flink/sink/TestFlinkRollingFileWriters.java    |    9 +-
 .../iceberg/flink/sink/TestFlinkWriterMetrics.java |    1 -
 .../flink/sink/TestIcebergFilesCommitter.java      |  176 +-
 .../flink/sink/TestIcebergStreamWriter.java        |  124 +-
 .../flink/sink/TestRowDataPartitionKey.java        |  149 +-
 .../apache/iceberg/flink/sink/TestTaskWriters.java |   40 +-
 .../iceberg/flink/source/BoundedTableFactory.java  |   21 +-
 .../iceberg/flink/source/BoundedTestSource.java    |   39 +-
 .../flink/source/ChangeLogTableTestBase.java       |   25 +-
 .../apache/iceberg/flink/source/SplitHelpers.java  |   37 +-
 .../flink/source/TestBoundedTableFactory.java      |   62 +-
 .../iceberg/flink/source/TestFlinkInputFormat.java |   77 +-
 .../source/TestFlinkInputFormatReaderDeletes.java  |   32 +-
 .../flink/source/TestFlinkMergingMetrics.java      |    4 +-
 .../flink/source/TestFlinkReaderDeletesBase.java   |   16 +-
 .../apache/iceberg/flink/source/TestFlinkScan.java |  158 +-
 .../iceberg/flink/source/TestFlinkScanSql.java     |  132 +-
 .../iceberg/flink/source/TestFlinkSource.java      |   21 +-
 .../flink/source/TestIcebergSourceBounded.java     |   33 +-
 .../flink/source/TestIcebergSourceContinuous.java  |  218 +-
 .../flink/source/TestIcebergSourceFailover.java    |   65 +-
 .../source/TestIcebergSourceReaderDeletes.java     |   57 +-
 .../flink/source/TestProjectMetaColumn.java        |  111 +-
 .../iceberg/flink/source/TestStreamScanSql.java    |   53 +-
 .../flink/source/TestStreamingMonitorFunction.java |  184 +-
 .../flink/source/TestStreamingReaderOperator.java  |   67 +-
 .../source/assigner/TestSimpleSplitAssigner.java   |   32 +-
 .../enumerator/ManualContinuousSplitPlanner.java   |   16 +-
 .../TestContinuousIcebergEnumerator.java           |   80 +-
 .../enumerator/TestContinuousSplitPlannerImpl.java |  327 +--
 ...estContinuousSplitPlannerImplStartStrategy.java |  136 +-
 .../TestIcebergEnumeratorStateSerializer.java      |   38 +-
 .../source/reader/ReaderFunctionTestBase.java      |   49 +-
 .../iceberg/flink/source/reader/ReaderUtil.java    |   40 +-
 .../flink/source/reader/TestArrayBatchRecords.java |   19 +-
 .../TestArrayPoolDataIteratorBatcherRowData.java   |   41 +-
 .../source/reader/TestRowDataReaderFunction.java   |   18 +-
 .../split/TestIcebergSourceSplitSerializer.java    |   52 +-
 .../java/org/apache/iceberg/gcp/GCPProperties.java |    4 +-
 .../org/apache/iceberg/gcp/gcs/BaseGCSFile.java    |    1 -
 .../java/org/apache/iceberg/gcp/gcs/GCSFileIO.java |   77 +-
 .../org/apache/iceberg/gcp/gcs/GCSInputFile.java   |   26 +-
 .../org/apache/iceberg/gcp/gcs/GCSInputStream.java |   24 +-
 .../org/apache/iceberg/gcp/gcs/GCSOutputFile.java  |   12 +-
 .../apache/iceberg/gcp/gcs/GCSOutputStream.java    |   32 +-
 .../org/apache/iceberg/gcp/gcs/GCSFileIOTest.java  |   21 +-
 .../apache/iceberg/gcp/gcs/GCSInputStreamTest.java |   32 +-
 .../iceberg/gcp/gcs/GCSOutputStreamTest.java       |   30 +-
 .../org/apache/iceberg/hive/CachedClientPool.java  |   16 +-
 .../java/org/apache/iceberg/hive/HiveCatalog.java  |  205 +-
 .../org/apache/iceberg/hive/HiveClientPool.java    |   41 +-
 .../apache/iceberg/hive/HiveSchemaConverter.java   |   43 +-
 .../org/apache/iceberg/hive/HiveSchemaUtil.java    |   46 +-
 .../apache/iceberg/hive/HiveTableOperations.java   |  342 ++-
 .../org/apache/iceberg/hive/MetastoreUtil.java     |   50 +-
 .../apache/iceberg/hive/RuntimeMetaException.java  |    5 +-
 .../iceberg/hive/HiveCreateReplaceTableTest.java   |  143 +-
 .../org/apache/iceberg/hive/HiveMetastoreTest.java |   13 +-
 .../org/apache/iceberg/hive/HiveTableBaseTest.java |   35 +-
 .../org/apache/iceberg/hive/HiveTableTest.java     |  361 +--
 .../java/org/apache/iceberg/hive/ScriptRunner.java |   73 +-
 .../apache/iceberg/hive/TestCachedClientPool.java  |    2 -
 .../org/apache/iceberg/hive/TestHiveCatalog.java   |  440 ++--
 .../apache/iceberg/hive/TestHiveClientPool.java    |   58 +-
 .../apache/iceberg/hive/TestHiveCommitLocks.java   |  109 +-
 .../org/apache/iceberg/hive/TestHiveCommits.java   |  216 +-
 .../org/apache/iceberg/hive/TestHiveMetastore.java |  119 +-
 .../apache/iceberg/hive/TestHiveSchemaUtil.java    |  193 +-
 .../iceberg/hive/TestHiveTableConcurrency.java     |   76 +-
 .../org/apache/hadoop/hive/ql/io/orc/OrcSplit.java |   97 +-
 .../IcebergDateObjectInspectorHive3.java           |    7 +-
 .../IcebergTimestampObjectInspectorHive3.java      |    8 +-
 ...ebergTimestampWithZoneObjectInspectorHive3.java |    4 +-
 .../hive/vector/CompatibilityHiveVectorUtils.java  |   41 +-
 .../vector/HiveIcebergVectorizedRecordReader.java  |   16 +-
 .../mr/hive/vector/HiveVectorizedReader.java       |   79 +-
 .../hive/vector/ParquetSchemaFieldNameVisitor.java |   31 +-
 .../mr/hive/vector/VectorizedRowBatchIterator.java |   28 +-
 .../apache/iceberg/orc/VectorizedReadUtils.java    |   14 +-
 .../iceberg/mr/hive/TestHiveSchemaUtilHive3.java   |    5 +-
 .../TestIcebergDateObjectInspectorHive3.java       |    2 -
 .../TestIcebergTimestampObjectInspectorHive3.java  |    9 +-
 ...ebergTimestampWithZoneObjectInspectorHive3.java |   17 +-
 .../hive/ql/exec/vector/VectorizedSupport.java     |    7 +-
 .../main/java/org/apache/iceberg/mr/Catalogs.java  |  157 +-
 .../org/apache/iceberg/mr/InputFormatConfig.java   |   58 +-
 .../org/apache/iceberg/mr/hive/Deserializer.java   |   74 +-
 .../iceberg/mr/hive/HiveIcebergFilterFactory.java  |   77 +-
 .../iceberg/mr/hive/HiveIcebergInputFormat.java    |   59 +-
 .../iceberg/mr/hive/HiveIcebergMetaHook.java       |  155 +-
 .../mr/hive/HiveIcebergOutputCommitter.java        |  274 ++-
 .../iceberg/mr/hive/HiveIcebergOutputFormat.java   |   60 +-
 .../iceberg/mr/hive/HiveIcebergRecordWriter.java   |   27 +-
 .../apache/iceberg/mr/hive/HiveIcebergSerDe.java   |   89 +-
 .../apache/iceberg/mr/hive/HiveIcebergSplit.java   |   13 +-
 .../iceberg/mr/hive/HiveIcebergStorageHandler.java |  119 +-
 .../java/org/apache/iceberg/mr/hive/TezUtil.java   |   58 +-
 .../IcebergBinaryObjectInspector.java              |    5 +-
 .../IcebergDateObjectInspector.java                |    4 +-
 .../IcebergDecimalObjectInspector.java             |    6 +-
 .../IcebergFixedObjectInspector.java               |    1 -
 .../objectinspector/IcebergObjectInspector.java    |   66 +-
 .../IcebergRecordObjectInspector.java              |   25 +-
 .../IcebergTimeObjectInspector.java                |    1 -
 .../IcebergTimestampObjectInspector.java           |    4 +-
 .../IcebergTimestampWithZoneObjectInspector.java   |    5 +-
 .../IcebergUUIDObjectInspector.java                |    1 -
 .../objectinspector/WriteObjectInspector.java      |    7 +-
 .../mapred/AbstractMapredIcebergRecordReader.java  |   11 +-
 .../org/apache/iceberg/mr/mapred/Container.java    |    1 -
 .../mr/mapred/MapredIcebergInputFormat.java        |   38 +-
 .../iceberg/mr/mapreduce/IcebergInputFormat.java   |  226 +-
 .../apache/iceberg/mr/mapreduce/IcebergSplit.java  |   12 +-
 .../mr/mapreduce/IcebergSplitContainer.java        |    2 -
 .../java/org/apache/iceberg/mr/TestCatalogs.java   |  137 +-
 .../java/org/apache/iceberg/mr/TestHelper.java     |   19 +-
 .../apache/iceberg/mr/TestIcebergInputFormats.java |  102 +-
 .../iceberg/mr/TestInputFormatReaderDeletes.java   |   32 +-
 .../hive/HiveIcebergStorageHandlerTestUtils.java   |   42 +-
 .../iceberg/mr/hive/HiveIcebergTestUtils.java      |  123 +-
 .../apache/iceberg/mr/hive/TestDeserializer.java   |  152 +-
 .../mr/hive/TestHiveIcebergFilterFactory.java      |  139 +-
 .../mr/hive/TestHiveIcebergOutputCommitter.java    |  117 +-
 .../iceberg/mr/hive/TestHiveIcebergSerDe.java      |   12 +-
 .../TestHiveIcebergStorageHandlerLocalScan.java    |  537 ++--
 .../hive/TestHiveIcebergStorageHandlerNoScan.java  |  823 ++++---
 .../TestHiveIcebergStorageHandlerTimezone.java     |   85 +-
 .../TestHiveIcebergStorageHandlerWithEngine.java   |  938 ++++---
 ...eIcebergStorageHandlerWithMultipleCatalogs.java |   97 +-
 .../org/apache/iceberg/mr/hive/TestHiveShell.java  |   46 +-
 .../org/apache/iceberg/mr/hive/TestTables.java     |  313 ++-
 .../TestIcebergBinaryObjectInspector.java          |    5 +-
 .../TestIcebergDateObjectInspector.java            |    2 -
 .../TestIcebergDecimalObjectInspector.java         |    8 +-
 .../TestIcebergFixedObjectInspector.java           |    7 +-
 .../TestIcebergObjectInspector.java                |  103 +-
 .../TestIcebergRecordObjectInspector.java          |   36 +-
 .../TestIcebergTimeObjectInspector.java            |    4 +-
 .../TestIcebergTimestampObjectInspector.java       |    5 +-
 ...estIcebergTimestampWithZoneObjectInspector.java |   15 +-
 .../TestIcebergUUIDObjectInspector.java            |    4 +-
 .../org/apache/iceberg/nessie/NessieCatalog.java   |  113 +-
 .../apache/iceberg/nessie/NessieIcebergClient.java |  179 +-
 .../iceberg/nessie/NessieTableOperations.java      |  123 +-
 .../java/org/apache/iceberg/nessie/NessieUtil.java |   19 +-
 .../apache/iceberg/nessie/UpdateableReference.java |    8 +-
 .../org/apache/iceberg/nessie/BaseTestIceberg.java |   35 +-
 .../iceberg/nessie/TestBranchVisibility.java       |  177 +-
 .../iceberg/nessie/TestCustomNessieClient.java     |   71 +-
 .../org/apache/iceberg/nessie/TestNamespace.java   |   19 +-
 .../apache/iceberg/nessie/TestNessieCatalog.java   |    1 -
 .../iceberg/nessie/TestNessieIcebergClient.java    |   13 +-
 .../org/apache/iceberg/nessie/TestNessieTable.java |  144 +-
 .../org/apache/iceberg/nessie/TestNessieUtil.java  |   52 +-
 .../apache/iceberg/data/orc/GenericOrcReader.java  |   29 +-
 .../apache/iceberg/data/orc/GenericOrcReaders.java |   61 +-
 .../apache/iceberg/data/orc/GenericOrcWriter.java  |   34 +-
 .../apache/iceberg/data/orc/GenericOrcWriters.java |   84 +-
 .../org/apache/iceberg/orc/ApplyNameMapping.java   |    7 +-
 .../iceberg/orc/EstimateOrcAvgWidthVisitor.java    |    1 -
 .../iceberg/orc/ExpressionToSearchArgument.java    |  131 +-
 .../main/java/org/apache/iceberg/orc/HasIds.java   |    4 +-
 .../java/org/apache/iceberg/orc/IdToOrcName.java   |   18 +-
 orc/src/main/java/org/apache/iceberg/orc/ORC.java  |  231 +-
 .../java/org/apache/iceberg/orc/ORCSchemaUtil.java |  205 +-
 .../org/apache/iceberg/orc/OrcBatchReader.java     |    9 +-
 .../org/apache/iceberg/orc/OrcFileAppender.java    |   52 +-
 .../java/org/apache/iceberg/orc/OrcIterable.java   |   57 +-
 .../java/org/apache/iceberg/orc/OrcMetrics.java    |  212 +-
 .../java/org/apache/iceberg/orc/OrcRowReader.java  |    9 +-
 .../java/org/apache/iceberg/orc/OrcRowWriter.java  |   11 +-
 .../org/apache/iceberg/orc/OrcSchemaVisitor.java   |   12 +-
 .../iceberg/orc/OrcSchemaWithTypeVisitor.java      |   18 +-
 .../apache/iceberg/orc/OrcToIcebergVisitor.java    |   91 +-
 .../org/apache/iceberg/orc/OrcValueReader.java     |    5 +-
 .../org/apache/iceberg/orc/OrcValueReaders.java    |   30 +-
 .../org/apache/iceberg/orc/OrcValueWriter.java     |    9 +-
 .../java/org/apache/iceberg/orc/RemoveIds.java     |    4 +-
 .../iceberg/orc/VectorizedRowBatchIterator.java    |   13 +-
 .../apache/iceberg/orc/TestBuildOrcProjection.java |  112 +-
 .../orc/TestEstimateOrcAvgWidthVisitor.java        |   89 +-
 .../orc/TestExpressionToSearchArgument.java        |  571 +++--
 .../org/apache/iceberg/orc/TestIdToOrcName.java    |   67 +-
 .../org/apache/iceberg/orc/TestORCSchemaUtil.java  |  600 +++--
 .../apache/iceberg/orc/TestOrcDeleteWriters.java   |  127 +-
 .../apache/iceberg/orc/TestTableProperties.java    |   64 +-
 .../iceberg/data/parquet/BaseParquetReaders.java   |  104 +-
 .../iceberg/data/parquet/BaseParquetWriter.java    |   60 +-
 .../data/parquet/GenericParquetReaders.java        |   22 +-
 .../iceberg/data/parquet/GenericParquetWriter.java |    4 +-
 .../apache/iceberg/parquet/ApplyNameMapping.java   |   29 +-
 .../apache/iceberg/parquet/BaseColumnIterator.java |    2 -
 .../apache/iceberg/parquet/BasePageIterator.java   |   50 +-
 .../org/apache/iceberg/parquet/ColumnIterator.java |   97 +-
 .../org/apache/iceberg/parquet/ColumnWriter.java   |   83 +-
 .../apache/iceberg/parquet/MessageTypeToType.java  |   35 +-
 .../org/apache/iceberg/parquet/PageIterator.java   |  150 +-
 .../java/org/apache/iceberg/parquet/Parquet.java   |  433 ++--
 .../org/apache/iceberg/parquet/ParquetAvro.java    |  158 +-
 .../apache/iceberg/parquet/ParquetAvroReader.java  |    4 +-
 .../iceberg/parquet/ParquetAvroValueReaders.java   |   55 +-
 .../apache/iceberg/parquet/ParquetAvroWriter.java  |   25 +-
 .../parquet/ParquetBloomRowGroupFilter.java        |   22 +-
 .../apache/iceberg/parquet/ParquetConversions.java |   15 +-
 .../parquet/ParquetDictionaryRowGroupFilter.java   |   43 +-
 .../org/apache/iceberg/parquet/ParquetFilters.java |   10 +-
 .../java/org/apache/iceberg/parquet/ParquetIO.java |   14 +-
 .../apache/iceberg/parquet/ParquetIterable.java    |    1 -
 .../parquet/ParquetMetricsRowGroupFilter.java      |   65 +-
 .../apache/iceberg/parquet/ParquetReadSupport.java |   40 +-
 .../org/apache/iceberg/parquet/ParquetReader.java  |   28 +-
 .../apache/iceberg/parquet/ParquetSchemaUtil.java  |   44 +-
 .../apache/iceberg/parquet/ParquetTypeVisitor.java |   29 +-
 .../org/apache/iceberg/parquet/ParquetUtil.java    |  133 +-
 .../apache/iceberg/parquet/ParquetValueReader.java |    1 -
 .../iceberg/parquet/ParquetValueReaders.java       |  115 +-
 .../apache/iceberg/parquet/ParquetValueWriter.java |    6 +-
 .../iceberg/parquet/ParquetValueWriters.java       |  134 +-
 .../iceberg/parquet/ParquetWriteAdapter.java       |    8 +-
 .../iceberg/parquet/ParquetWriteSupport.java       |   13 +-
 .../org/apache/iceberg/parquet/ParquetWriter.java  |   87 +-
 .../org/apache/iceberg/parquet/PruneColumns.java   |    8 +-
 .../java/org/apache/iceberg/parquet/ReadConf.java  |   43 +-
 .../java/org/apache/iceberg/parquet/RemoveIds.java |   11 +-
 .../org/apache/iceberg/parquet/TripleIterator.java |   37 +-
 .../org/apache/iceberg/parquet/TripleWriter.java   |    1 -
 .../apache/iceberg/parquet/TypeToMessageType.java  |   51 +-
 .../iceberg/parquet/TypeWithSchemaVisitor.java     |   59 +-
 .../iceberg/parquet/ValuesAsBytesReader.java       |   12 +-
 .../iceberg/parquet/VectorizedParquetReader.java   |   29 +-
 .../apache/iceberg/parquet/VectorizedReader.java   |   16 +-
 .../test/java/org/apache/iceberg/TestHelpers.java  |   41 +-
 .../iceberg/avro/TestParquetReadProjection.java    |   18 +-
 .../apache/iceberg/avro/TestReadProjection.java    |  362 +--
 .../iceberg/parquet/ParquetWritingTestUtils.java   |   51 +-
 .../iceberg/parquet/TestBloomRowGroupFilter.java   |  873 ++++---
 .../iceberg/parquet/TestCDHParquetStatistics.java  |   29 +-
 .../parquet/TestDictionaryRowGroupFilter.java      | 1072 ++++----
 .../org/apache/iceberg/parquet/TestParquet.java    |   99 +-
 .../iceberg/parquet/TestParquetDataWriter.java     |  128 +-
 .../iceberg/parquet/TestParquetDeleteWriters.java  |  129 +-
 .../iceberg/parquet/TestParquetSchemaUtil.java     |  505 ++--
 .../apache/iceberg/parquet/TestPruneColumns.java   |  318 ++-
 .../apache/iceberg/pig/IcebergPigInputFormat.java  |   59 +-
 .../org/apache/iceberg/pig/IcebergStorage.java     |   82 +-
 .../org/apache/iceberg/pig/PigParquetReader.java   |   61 +-
 .../java/org/apache/iceberg/pig/SchemaUtil.java    |   57 +-
 .../org/apache/iceberg/pig/SchemaUtilTest.java     |  247 +-
 .../apache/iceberg/spark/SparkBenchmarkUtil.java   |    7 +-
 .../SparkParquetReadersFlatDataBenchmark.java      |  136 +-
 .../SparkParquetReadersNestedDataBenchmark.java    |  138 +-
 .../SparkParquetWritersFlatDataBenchmark.java      |   59 +-
 .../SparkParquetWritersNestedDataBenchmark.java    |   62 +-
 .../org/apache/iceberg/spark/source/Action.java    |    1 -
 .../spark/source/IcebergSourceBenchmark.java       |   93 +-
 .../source/IcebergSourceFlatDataBenchmark.java     |   26 +-
 .../source/IcebergSourceNestedDataBenchmark.java   |   25 +-
 .../IcebergSourceNestedListDataBenchmark.java      |   26 +-
 .../iceberg/spark/source/WritersBenchmark.java     |  166 +-
 .../spark/source/avro/AvroWritersBenchmark.java    |    4 +-
 .../IcebergSourceFlatAvroDataReadBenchmark.java    |   96 +-
 .../IcebergSourceNestedAvroDataReadBenchmark.java  |   96 +-
 .../orc/IcebergSourceFlatORCDataBenchmark.java     |   37 +-
 .../orc/IcebergSourceFlatORCDataReadBenchmark.java |  152 +-
 ...ebergSourceNestedListORCDataWriteBenchmark.java |   52 +-
 .../IcebergSourceNestedORCDataReadBenchmark.java   |  132 +-
 ...cebergSourceFlatParquetDataFilterBenchmark.java |   71 +-
 .../IcebergSourceFlatParquetDataReadBenchmark.java |  102 +-
 ...IcebergSourceFlatParquetDataWriteBenchmark.java |   15 +-
 ...gSourceNestedListParquetDataWriteBenchmark.java |   28 +-
 ...bergSourceNestedParquetDataFilterBenchmark.java |   71 +-
 ...cebergSourceNestedParquetDataReadBenchmark.java |  100 +-
 ...ebergSourceNestedParquetDataWriteBenchmark.java |   20 +-
 .../source/parquet/ParquetWritersBenchmark.java    |    4 +-
 ...dDictionaryEncodedFlatParquetDataBenchmark.java |   36 +-
 .../VectorizedReadFlatParquetDataBenchmark.java    |  265 +-
 .../java/org/apache/iceberg/actions/Actions.java   |   32 +-
 .../iceberg/actions/RewriteDataFilesAction.java    |   11 +-
 .../org/apache/iceberg/actions/SparkActions.java   |    6 +-
 .../org/apache/iceberg/spark/IcebergSpark.java     |   15 +-
 .../org/apache/iceberg/spark/JobGroupInfo.java     |    6 +-
 .../org/apache/iceberg/spark/JobGroupUtils.java    |   10 +-
 .../iceberg/spark/PruneColumnsWithReordering.java  |   89 +-
 .../spark/PruneColumnsWithoutReordering.java       |   72 +-
 .../org/apache/iceberg/spark/SparkConfParser.java  |    4 +-
 .../org/apache/iceberg/spark/SparkDataFile.java    |   25 +-
 .../apache/iceberg/spark/SparkExceptionUtil.java   |    8 +-
 .../org/apache/iceberg/spark/SparkFilters.java     |  118 +-
 .../iceberg/spark/SparkFixupTimestampType.java     |   12 +-
 .../org/apache/iceberg/spark/SparkFixupTypes.java  |    9 +-
 .../org/apache/iceberg/spark/SparkReadConf.java    |   84 +-
 .../org/apache/iceberg/spark/SparkReadOptions.java |   11 +-
 .../apache/iceberg/spark/SparkSQLProperties.java   |    7 +-
 .../org/apache/iceberg/spark/SparkSchemaUtil.java  |  138 +-
 .../org/apache/iceberg/spark/SparkStructLike.java  |    1 -
 .../org/apache/iceberg/spark/SparkTableUtil.java   |  459 ++--
 .../org/apache/iceberg/spark/SparkTypeToType.java  |   23 +-
 .../org/apache/iceberg/spark/SparkTypeVisitor.java |   15 +-
 .../java/org/apache/iceberg/spark/SparkUtil.java   |  118 +-
 .../apache/iceberg/spark/SparkValueConverter.java  |    8 +-
 .../org/apache/iceberg/spark/SparkWriteConf.java   |   78 +-
 .../apache/iceberg/spark/SparkWriteOptions.java    |   11 +-
 .../org/apache/iceberg/spark/TypeToSparkType.java  |   11 +-
 .../actions/BaseDeleteOrphanFilesSparkAction.java  |  111 +-
 .../BaseDeleteReachableFilesSparkAction.java       |  105 +-
 .../actions/BaseExpireSnapshotsSparkAction.java    |  132 +-
 .../actions/BaseRewriteManifestsSparkAction.java   |  143 +-
 .../actions/BaseSnapshotUpdateSparkAction.java     |    5 +-
 .../iceberg/spark/actions/BaseSparkAction.java     |   24 +-
 .../iceberg/spark/actions/BaseSparkActions.java    |    1 -
 .../iceberg/spark/actions/ManifestFileBean.java    |    1 -
 .../apache/iceberg/spark/actions/SparkActions.java |    7 +-
 .../spark/data/AvroWithSparkSchemaVisitor.java     |   10 +-
 .../spark/data/ParquetWithSparkSchemaVisitor.java  |   83 +-
 .../apache/iceberg/spark/data/SparkAvroReader.java |   21 +-
 .../apache/iceberg/spark/data/SparkAvroWriter.java |   35 +-
 .../apache/iceberg/spark/data/SparkOrcReader.java  |   27 +-
 .../iceberg/spark/data/SparkOrcValueReaders.java   |   53 +-
 .../iceberg/spark/data/SparkOrcValueWriters.java   |   42 +-
 .../apache/iceberg/spark/data/SparkOrcWriter.java  |   46 +-
 .../iceberg/spark/data/SparkParquetReaders.java    |   89 +-
 .../iceberg/spark/data/SparkParquetWriters.java    |  104 +-
 .../iceberg/spark/data/SparkValueReaders.java      |   44 +-
 .../iceberg/spark/data/SparkValueWriters.java      |   42 +-
 .../vectorized/ArrowVectorAccessorFactory.java     |   21 +-
 .../data/vectorized/ArrowVectorAccessors.java      |    4 +-
 .../spark/data/vectorized/ColumnarBatchReader.java |   16 +-
 .../data/vectorized/ConstantColumnVector.java      |    4 +-
 .../data/vectorized/IcebergArrowColumnVector.java  |   18 +-
 .../data/vectorized/RowPositionColumnVector.java   |    4 +-
 .../data/vectorized/VectorizedSparkOrcReaders.java |  108 +-
 .../vectorized/VectorizedSparkParquetReaders.java  |   19 +-
 .../iceberg/spark/source/BaseDataReader.java       |   16 +-
 .../iceberg/spark/source/BatchDataReader.java      |   61 +-
 .../iceberg/spark/source/CustomCatalogs.java       |   31 +-
 .../spark/source/EqualityDeleteRowReader.java      |    4 +-
 .../apache/iceberg/spark/source/IcebergSource.java |   57 +-
 .../iceberg/spark/source/InternalRowWrapper.java   |   13 +-
 .../org/apache/iceberg/spark/source/Reader.java    |  163 +-
 .../apache/iceberg/spark/source/RowDataReader.java |   72 +-
 .../iceberg/spark/source/RowDataRewriter.java      |   81 +-
 .../iceberg/spark/source/SparkAppenderFactory.java |   67 +-
 .../spark/source/SparkFileWriterFactory.java       |   82 +-
 .../spark/source/SparkPartitionedFanoutWriter.java |   12 +-
 .../spark/source/SparkPartitionedWriter.java       |   14 +-
 .../org/apache/iceberg/spark/source/Stats.java     |    1 -
 .../iceberg/spark/source/StreamingOffset.java      |   29 +-
 .../iceberg/spark/source/StreamingWriter.java      |   15 +-
 .../iceberg/spark/source/StructInternalRow.java    |   63 +-
 .../org/apache/iceberg/spark/source/Writer.java    |  150 +-
 .../test/java/org/apache/iceberg/KryoHelpers.java  |    7 +-
 .../java/org/apache/iceberg/TaskCheckHelper.java   |   80 +-
 .../apache/iceberg/TestDataFileSerialization.java  |   72 +-
 .../apache/iceberg/TestFileIOSerialization.java    |   31 +-
 .../iceberg/TestManifestFileSerialization.java     |  151 +-
 .../apache/iceberg/TestScanTaskSerialization.java  |   50 +-
 .../org/apache/iceberg/TestTableSerialization.java |   47 +-
 .../actions/TestRewriteDataFilesAction.java        |  290 ++-
 .../apache/iceberg/examples/ConcurrencyTest.java   |   28 +-
 .../iceberg/examples/ReadAndWriteTablesTest.java   |   80 +-
 .../iceberg/examples/SchemaEvolutionTest.java      |   72 +-
 .../org/apache/iceberg/examples/SimpleRecord.java  |    4 +-
 .../examples/SnapshotFunctionalityTest.java        |   36 +-
 .../org/apache/iceberg/spark/SparkTestBase.java    |  107 +-
 .../apache/iceberg/spark/TestSparkSchemaUtil.java  |   23 +-
 .../iceberg/spark/TestSparkValueConverter.java     |   62 +-
 .../actions/TestDeleteReachableFilesAction.java    |  328 ++-
 .../spark/actions/TestExpireSnapshotsAction.java   |  961 ++++----
 .../spark/actions/TestRemoveOrphanFilesAction.java |  461 ++--
 .../spark/actions/TestRewriteManifestsAction.java  |  273 ++-
 .../apache/iceberg/spark/data/AvroDataTest.java    |  277 ++-
 .../apache/iceberg/spark/data/GenericsHelpers.java |  135 +-
 .../org/apache/iceberg/spark/data/RandomData.java  |  103 +-
 .../org/apache/iceberg/spark/data/TestHelpers.java |  294 ++-
 .../apache/iceberg/spark/data/TestOrcWrite.java    |   24 +-
 .../iceberg/spark/data/TestParquetAvroReader.java  |  156 +-
 .../iceberg/spark/data/TestParquetAvroWriter.java  |   98 +-
 .../iceberg/spark/data/TestSparkAvroEnums.java     |   35 +-
 .../iceberg/spark/data/TestSparkAvroReader.java    |   20 +-
 .../iceberg/spark/data/TestSparkDateTimes.java     |    5 +-
 .../data/TestSparkOrcReadMetadataColumns.java      |   91 +-
 .../iceberg/spark/data/TestSparkOrcReader.java     |   55 +-
 .../data/TestSparkParquetReadMetadataColumns.java  |   98 +-
 .../iceberg/spark/data/TestSparkParquetReader.java |   41 +-
 .../iceberg/spark/data/TestSparkParquetWriter.java |   99 +-
 .../spark/data/TestSparkRecordOrcReaderWriter.java |   67 +-
 ...estParquetDictionaryEncodedVectorizedReads.java |   57 +-
 ...naryFallbackToPlainEncodingVectorizedReads.java |   23 +-
 .../vectorized/TestParquetVectorizedReads.java     |  245 +-
 .../apache/iceberg/spark/source/LogMessage.java    |    1 -
 .../apache/iceberg/spark/source/ManualSource.java  |   26 +-
 .../apache/iceberg/spark/source/SimpleRecord.java  |    4 +-
 .../apache/iceberg/spark/source/TestAvroScan.java  |   32 +-
 .../apache/iceberg/spark/source/TestCatalog.java   |   42 +-
 .../iceberg/spark/source/TestCustomCatalog.java    |  149 +-
 .../iceberg/spark/source/TestDataFrameWrites.java  |  233 +-
 .../spark/source/TestDataSourceOptions.java        |  261 +-
 .../iceberg/spark/source/TestFilteredScan.java     |  346 +--
 .../spark/source/TestForwardCompatibility.java     |  101 +-
 .../iceberg/spark/source/TestIcebergSource.java    |    1 -
 .../source/TestIcebergSourceHadoopTables.java      |    1 -
 .../spark/source/TestIcebergSourceHiveTables.java  |    4 +-
 .../spark/source/TestIcebergSourceTablesBase.java  | 1452 ++++++-----
 .../iceberg/spark/source/TestIcebergSpark.java     |   83 +-
 .../spark/source/TestIdentityPartitionData.java    |  174 +-
 .../spark/source/TestInternalRowWrapper.java       |    7 +-
 .../spark/source/TestNameMappingProjection.java    |  107 +-
 .../iceberg/spark/source/TestParquetScan.java      |   49 +-
 .../iceberg/spark/source/TestPartitionPruning.java |  319 ++-
 .../iceberg/spark/source/TestPartitionValues.java  |  316 +--
 .../iceberg/spark/source/TestReadProjection.java   |  439 ++--
 .../apache/iceberg/spark/source/TestSelect.java    |   98 +-
 .../spark/source/TestSnapshotSelection.java        |  138 +-
 .../spark/source/TestSparkAppenderFactory.java     |    9 +-
 .../spark/source/TestSparkBaseDataReader.java      |   88 +-
 .../iceberg/spark/source/TestSparkDataFile.java    |  108 +-
 .../iceberg/spark/source/TestSparkDataWrite.java   |  319 ++-
 .../spark/source/TestSparkFileWriterFactory.java   |    9 +-
 .../spark/source/TestSparkMergingMetrics.java      |   39 +-
 .../spark/source/TestSparkPartitioningWriters.java |    9 +-
 .../source/TestSparkPositionDeltaWriters.java      |    9 +-
 .../spark/source/TestSparkReadProjection.java      |  224 +-
 .../spark/source/TestSparkReaderDeletes.java       |  159 +-
 .../spark/source/TestSparkRollingFileWriters.java  |    9 +-
 .../iceberg/spark/source/TestSparkSchema.java      |  123 +-
 .../iceberg/spark/source/TestSparkTableUtil.java   |  378 +--
 .../TestSparkTableUtilWithInMemoryCatalog.java     |  395 +--
 .../spark/source/TestSparkWriterMetrics.java       |    1 -
 .../iceberg/spark/source/TestStreamingOffset.java  |   28 +-
 .../spark/source/TestStructuredStreaming.java      |  148 +-
 .../apache/iceberg/spark/source/TestTables.java    |   11 +-
 .../spark/source/TestTimestampWithoutZone.java     |  169 +-
 .../spark/source/TestWriteMetricsConfig.java       |   84 +-
 .../iceberg/spark/source/ThreeColumnRecord.java    |   16 +-
 .../apache/iceberg/spark/extensions/Employee.java  |    4 +-
 .../spark/extensions/SparkExtensionsTestBase.java  |   35 +-
 .../SparkRowLevelOperationsTestBase.java           |  131 +-
 .../spark/extensions/TestAddFilesProcedure.java    |  605 +++--
 .../extensions/TestAlterTablePartitionFields.java  |  250 +-
 .../spark/extensions/TestAlterTableSchema.java     |   72 +-
 .../spark/extensions/TestAncestorsOfProcedure.java |   37 +-
 .../spark/extensions/TestCallStatementParser.java  |   92 +-
 .../TestCherrypickSnapshotProcedure.java           |   85 +-
 .../spark/extensions/TestCopyOnWriteDelete.java    |   10 +-
 .../spark/extensions/TestCopyOnWriteMerge.java     |   10 +-
 .../spark/extensions/TestCopyOnWriteUpdate.java    |   10 +-
 .../iceberg/spark/extensions/TestDelete.java       |  376 +--
 .../extensions/TestExpireSnapshotsProcedure.java   |  202 +-
 .../spark/extensions/TestIcebergExpressions.java   |   34 +-
 .../apache/iceberg/spark/extensions/TestMerge.java | 2176 ++++++++++-------
 .../extensions/TestMigrateTableProcedure.java      |   64 +-
 .../extensions/TestPublishChangesProcedure.java    |   77 +-
 .../extensions/TestRemoveOrphanFilesProcedure.java |  203 +-
 .../extensions/TestRewriteDataFilesProcedure.java  |  285 ++-
 .../extensions/TestRewriteManifestsProcedure.java  |  130 +-
 .../TestRollbackToSnapshotProcedure.java           |  141 +-
 .../TestRollbackToTimestampProcedure.java          |  140 +-
 .../TestSetCurrentSnapshotProcedure.java           |  131 +-
 .../TestSetWriteDistributionAndOrdering.java       |  130 +-
 .../extensions/TestSnapshotTableProcedure.java     |  132 +-
 .../iceberg/spark/extensions/TestUpdate.java       |  666 +++--
 .../apache/iceberg/spark/SparkBenchmarkUtil.java   |    7 +-
 .../SparkParquetReadersFlatDataBenchmark.java      |  136 +-
 .../SparkParquetReadersNestedDataBenchmark.java    |  138 +-
 .../SparkParquetWritersFlatDataBenchmark.java      |   59 +-
 .../SparkParquetWritersNestedDataBenchmark.java    |   62 +-
 .../org/apache/iceberg/spark/source/Action.java    |    1 -
 .../spark/source/IcebergSourceBenchmark.java       |   93 +-
 .../source/IcebergSourceFlatDataBenchmark.java     |   26 +-
 .../source/IcebergSourceNestedDataBenchmark.java   |   25 +-
 .../IcebergSourceNestedListDataBenchmark.java      |   26 +-
 .../iceberg/spark/source/WritersBenchmark.java     |  166 +-
 .../spark/source/avro/AvroWritersBenchmark.java    |    4 +-
 .../IcebergSourceFlatAvroDataReadBenchmark.java    |   96 +-
 .../IcebergSourceNestedAvroDataReadBenchmark.java  |   96 +-
 .../orc/IcebergSourceFlatORCDataBenchmark.java     |   37 +-
 .../orc/IcebergSourceFlatORCDataReadBenchmark.java |  152 +-
 ...ebergSourceNestedListORCDataWriteBenchmark.java |   52 +-
 .../IcebergSourceNestedORCDataReadBenchmark.java   |  132 +-
 ...cebergSourceFlatParquetDataFilterBenchmark.java |   71 +-
 .../IcebergSourceFlatParquetDataReadBenchmark.java |  102 +-
 ...IcebergSourceFlatParquetDataWriteBenchmark.java |   15 +-
 ...gSourceNestedListParquetDataWriteBenchmark.java |   28 +-
 ...bergSourceNestedParquetDataFilterBenchmark.java |   71 +-
 ...cebergSourceNestedParquetDataReadBenchmark.java |  100 +-
 ...ebergSourceNestedParquetDataWriteBenchmark.java |   20 +-
 .../source/parquet/ParquetWritersBenchmark.java    |    4 +-
 ...dDictionaryEncodedFlatParquetDataBenchmark.java |   38 +-
 .../VectorizedReadFlatParquetDataBenchmark.java    |  265 +-
 .../java/org/apache/iceberg/spark/BaseCatalog.java |    4 +-
 .../org/apache/iceberg/spark/CommitMetadata.java   |   17 +-
 .../iceberg/spark/FileRewriteCoordinator.java      |   20 +-
 .../iceberg/spark/FileScanTaskSetManager.java      |    7 +-
 .../org/apache/iceberg/spark/IcebergSpark.java     |   15 +-
 .../org/apache/iceberg/spark/JobGroupInfo.java     |    6 +-
 .../org/apache/iceberg/spark/JobGroupUtils.java    |   10 +-
 .../java/org/apache/iceberg/spark/OrderField.java  |   53 +-
 .../org/apache/iceberg/spark/PathIdentifier.java   |    8 +-
 .../iceberg/spark/PruneColumnsWithReordering.java  |   89 +-
 .../spark/PruneColumnsWithoutReordering.java       |   72 +-
 .../apache/iceberg/spark/RollbackStagedTable.java  |   51 +-
 .../org/apache/iceberg/spark/SortOrderToSpark.java |    8 +-
 .../java/org/apache/iceberg/spark/Spark3Util.java  |  423 ++--
 .../apache/iceberg/spark/Spark3VersionUtil.java    |    4 +-
 .../org/apache/iceberg/spark/SparkCatalog.java     |  199 +-
 .../org/apache/iceberg/spark/SparkConfParser.java  |    4 +-
 .../org/apache/iceberg/spark/SparkDataFile.java    |   25 +-
 .../apache/iceberg/spark/SparkExceptionUtil.java   |    8 +-
 .../org/apache/iceberg/spark/SparkFilters.java     |  125 +-
 .../iceberg/spark/SparkFixupTimestampType.java     |   12 +-
 .../org/apache/iceberg/spark/SparkFixupTypes.java  |    9 +-
 .../org/apache/iceberg/spark/SparkReadConf.java    |   93 +-
 .../org/apache/iceberg/spark/SparkReadOptions.java |   14 +-
 .../apache/iceberg/spark/SparkSQLProperties.java   |    7 +-
 .../org/apache/iceberg/spark/SparkSchemaUtil.java  |  138 +-
 .../apache/iceberg/spark/SparkSessionCatalog.java  |   63 +-
 .../org/apache/iceberg/spark/SparkStructLike.java  |    1 -
 .../org/apache/iceberg/spark/SparkTableUtil.java   |  463 ++--
 .../org/apache/iceberg/spark/SparkTypeToType.java  |   23 +-
 .../org/apache/iceberg/spark/SparkTypeVisitor.java |   15 +-
 .../java/org/apache/iceberg/spark/SparkUtil.java   |  118 +-
 .../apache/iceberg/spark/SparkValueConverter.java  |    8 +-
 .../org/apache/iceberg/spark/SparkWriteConf.java   |   78 +-
 .../apache/iceberg/spark/SparkWriteOptions.java    |   11 +-
 .../org/apache/iceberg/spark/TypeToSparkType.java  |   11 +-
 .../actions/BaseDeleteOrphanFilesSparkAction.java  |  111 +-
 .../BaseDeleteReachableFilesSparkAction.java       |  105 +-
 .../actions/BaseExpireSnapshotsSparkAction.java    |  132 +-
 .../spark/actions/BaseMigrateTableSparkAction.java |   38 +-
 .../actions/BaseRewriteDataFilesSpark3Action.java  |    1 -
 .../actions/BaseRewriteDataFilesSparkAction.java   |  341 +--
 .../actions/BaseRewriteManifestsSparkAction.java   |  143 +-
 .../actions/BaseSnapshotTableSparkAction.java      |   61 +-
 .../actions/BaseSnapshotUpdateSparkAction.java     |    5 +-
 .../iceberg/spark/actions/BaseSparkAction.java     |   24 +-
 .../iceberg/spark/actions/BaseSparkActions.java    |    1 -
 .../actions/BaseTableCreationSparkAction.java      |   50 +-
 .../iceberg/spark/actions/ManifestFileBean.java    |    1 -
 .../spark/actions/Spark3BinPackStrategy.java       |   17 +-
 .../iceberg/spark/actions/Spark3SortStrategy.java  |   51 +-
 .../apache/iceberg/spark/actions/SparkActions.java |   19 +-
 .../spark/data/AvroWithSparkSchemaVisitor.java     |   10 +-
 .../spark/data/ParquetWithSparkSchemaVisitor.java  |   83 +-
 .../apache/iceberg/spark/data/SparkAvroReader.java |   21 +-
 .../apache/iceberg/spark/data/SparkAvroWriter.java |   35 +-
 .../apache/iceberg/spark/data/SparkOrcReader.java  |   27 +-
 .../iceberg/spark/data/SparkOrcValueReaders.java   |   53 +-
 .../iceberg/spark/data/SparkOrcValueWriters.java   |   42 +-
 .../apache/iceberg/spark/data/SparkOrcWriter.java  |   46 +-
 .../iceberg/spark/data/SparkParquetReaders.java    |   89 +-
 .../iceberg/spark/data/SparkParquetWriters.java    |  104 +-
 .../iceberg/spark/data/SparkValueReaders.java      |   44 +-
 .../iceberg/spark/data/SparkValueWriters.java      |   42 +-
 .../vectorized/ArrowVectorAccessorFactory.java     |   21 +-
 .../data/vectorized/ArrowVectorAccessors.java      |    4 +-
 .../spark/data/vectorized/ColumnarBatchReader.java |   16 +-
 .../data/vectorized/ConstantColumnVector.java      |    4 +-
 .../data/vectorized/IcebergArrowColumnVector.java  |   18 +-
 .../data/vectorized/RowPositionColumnVector.java   |    4 +-
 .../data/vectorized/VectorizedSparkOrcReaders.java |  108 +-
 .../vectorized/VectorizedSparkParquetReaders.java  |   19 +-
 .../spark/procedures/AddFilesProcedure.java        |  204 +-
 .../spark/procedures/AncestorsOfProcedure.java     |   26 +-
 .../iceberg/spark/procedures/BaseProcedure.java    |   36 +-
 .../procedures/CherrypickSnapshotProcedure.java    |   45 +-
 .../spark/procedures/ExpireSnapshotsProcedure.java |   93 +-
 .../spark/procedures/MigrateTableProcedure.java    |   37 +-
 .../spark/procedures/PublishChangesProcedure.java  |   61 +-
 .../procedures/RemoveOrphanFilesProcedure.java     |   94 +-
 .../procedures/RewriteDataFilesProcedure.java      |  102 +-
 .../procedures/RewriteManifestsProcedure.java      |   46 +-
 .../procedures/RollbackToSnapshotProcedure.java    |   41 +-
 .../procedures/RollbackToTimestampProcedure.java   |   44 +-
 .../procedures/SetCurrentSnapshotProcedure.java    |   43 +-
 .../spark/procedures/SnapshotTableProcedure.java   |   46 +-
 .../iceberg/spark/procedures/SparkProcedures.java  |    5 +-
 .../iceberg/spark/source/BaseDataReader.java       |   16 +-
 .../iceberg/spark/source/BatchDataReader.java      |   61 +-
 .../spark/source/EqualityDeleteRowReader.java      |    4 +-
 .../apache/iceberg/spark/source/IcebergSource.java |   67 +-
 .../iceberg/spark/source/InternalRowWrapper.java   |   13 +-
 .../apache/iceberg/spark/source/RowDataReader.java |   72 +-
 .../iceberg/spark/source/RowDataRewriter.java      |   81 +-
 .../spark/source/SerializableTableWithSize.java    |    7 +-
 .../iceberg/spark/source/SparkAppenderFactory.java |   67 +-
 .../iceberg/spark/source/SparkBatchQueryScan.java  |   53 +-
 .../iceberg/spark/source/SparkBatchScan.java       |   91 +-
 .../spark/source/SparkFileWriterFactory.java       |   82 +-
 .../iceberg/spark/source/SparkFilesScan.java       |   37 +-
 .../spark/source/SparkFilesScanBuilder.java        |    1 -
 .../iceberg/spark/source/SparkMergeBuilder.java    |   41 +-
 .../iceberg/spark/source/SparkMergeScan.java       |   67 +-
 .../spark/source/SparkMicroBatchStream.java        |   90 +-
 .../spark/source/SparkPartitionedFanoutWriter.java |   12 +-
 .../spark/source/SparkPartitionedWriter.java       |   14 +-
 .../iceberg/spark/source/SparkRewriteBuilder.java  |   10 +-
 .../iceberg/spark/source/SparkScanBuilder.java     |   52 +-
 .../apache/iceberg/spark/source/SparkTable.java    |   90 +-
 .../apache/iceberg/spark/source/SparkWrite.java    |  174 +-
 .../iceberg/spark/source/SparkWriteBuilder.java    |   41 +-
 .../iceberg/spark/source/StagedSparkTable.java     |    1 -
 .../org/apache/iceberg/spark/source/Stats.java     |    1 -
 .../iceberg/spark/source/StreamingOffset.java      |   29 +-
 .../iceberg/spark/source/StructInternalRow.java    |   63 +-
 .../analysis/NoSuchProcedureException.java         |    8 +-
 .../iceberg/catalog/ExtendedSupportsDelete.java    |   18 +-
 .../sql/connector/iceberg/catalog/Procedure.java   |   30 +-
 .../iceberg/catalog/ProcedureCatalog.java          |    7 +-
 .../iceberg/catalog/ProcedureParameter.java        |   17 +-
 .../iceberg/catalog/ProcedureParameterImpl.java    |   14 +-
 .../connector/iceberg/catalog/SupportsMerge.java   |   10 +-
 .../distributions/ClusteredDistribution.java       |    5 +-
 .../iceberg/distributions/Distribution.java        |    5 +-
 .../iceberg/distributions/Distributions.java       |   12 +-
 .../iceberg/distributions/OrderedDistribution.java |    9 +-
 .../distributions/UnspecifiedDistribution.java     |    1 -
 .../impl/ClusterDistributionImpl.java              |    2 -
 .../impl/OrderedDistributionImpl.java              |    2 -
 .../impl/UnspecifiedDistributionImpl.java          |    6 +-
 .../iceberg/expressions/NullOrdering.java          |    4 +-
 .../iceberg/expressions/SortDirection.java         |    4 +-
 .../connector/iceberg/expressions/SortOrder.java   |   13 +-
 .../connector/iceberg/read/SupportsFileFilter.java |    1 -
 .../sql/connector/iceberg/write/MergeBuilder.java  |    5 +-
 .../test/java/org/apache/iceberg/KryoHelpers.java  |    7 +-
 .../java/org/apache/iceberg/TaskCheckHelper.java   |   80 +-
 .../apache/iceberg/TestDataFileSerialization.java  |   72 +-
 .../apache/iceberg/TestFileIOSerialization.java    |   31 +-
 .../iceberg/TestManifestFileSerialization.java     |  151 +-
 .../apache/iceberg/TestScanTaskSerialization.java  |   50 +-
 .../org/apache/iceberg/TestTableSerialization.java |   47 +-
 .../apache/iceberg/spark/SparkCatalogConfig.java   |   31 +-
 .../apache/iceberg/spark/SparkCatalogTestBase.java |   37 +-
 .../org/apache/iceberg/spark/SparkTestBase.java    |  134 +-
 .../iceberg/spark/SparkTestBaseWithCatalog.java    |   20 +-
 .../iceberg/spark/TestFileRewriteCoordinator.java  |  117 +-
 .../org/apache/iceberg/spark/TestSpark3Util.java   |  100 +-
 .../iceberg/spark/TestSparkCatalogOperations.java  |    1 -
 .../org/apache/iceberg/spark/TestSparkFilters.java |   25 +-
 .../apache/iceberg/spark/TestSparkSchemaUtil.java  |   23 +-
 .../apache/iceberg/spark/TestSparkTableUtil.java   |    1 -
 .../iceberg/spark/TestSparkValueConverter.java     |   62 +-
 .../iceberg/spark/actions/TestCreateActions.java   |  386 ++-
 .../actions/TestDeleteReachableFilesAction.java    |  328 ++-
 .../spark/actions/TestExpireSnapshotsAction.java   | 1000 ++++----
 .../spark/actions/TestRemoveOrphanFilesAction.java |  461 ++--
 .../actions/TestRemoveOrphanFilesAction3.java      |   66 +-
 .../spark/actions/TestRewriteDataFilesAction.java  |  626 +++--
 .../spark/actions/TestRewriteManifestsAction.java  |  274 +--
 .../apache/iceberg/spark/data/AvroDataTest.java    |  277 ++-
 .../apache/iceberg/spark/data/GenericsHelpers.java |  135 +-
 .../org/apache/iceberg/spark/data/RandomData.java  |  103 +-
 .../org/apache/iceberg/spark/data/TestHelpers.java |  294 ++-
 .../apache/iceberg/spark/data/TestOrcWrite.java    |   24 +-
 .../iceberg/spark/data/TestParquetAvroReader.java  |  156 +-
 .../iceberg/spark/data/TestParquetAvroWriter.java  |   98 +-
 .../iceberg/spark/data/TestSparkAvroEnums.java     |   35 +-
 .../iceberg/spark/data/TestSparkAvroReader.java    |   20 +-
 .../iceberg/spark/data/TestSparkDateTimes.java     |    5 +-
 .../data/TestSparkOrcReadMetadataColumns.java      |   91 +-
 .../iceberg/spark/data/TestSparkOrcReader.java     |   55 +-
 .../data/TestSparkParquetReadMetadataColumns.java  |   98 +-
 .../iceberg/spark/data/TestSparkParquetReader.java |   41 +-
 .../iceberg/spark/data/TestSparkParquetWriter.java |   99 +-
 .../spark/data/TestSparkRecordOrcReaderWriter.java |   67 +-
 ...estParquetDictionaryEncodedVectorizedReads.java |   57 +-
 ...naryFallbackToPlainEncodingVectorizedReads.java |   23 +-
 .../vectorized/TestParquetVectorizedReads.java     |  245 +-
 .../apache/iceberg/spark/source/LogMessage.java    |    1 -
 .../apache/iceberg/spark/source/ManualSource.java  |    7 +-
 .../apache/iceberg/spark/source/SimpleRecord.java  |    4 +-
 .../iceberg/spark/source/SparkTestTable.java       |    1 -
 .../apache/iceberg/spark/source/TestAvroScan.java  |   32 +-
 .../iceberg/spark/source/TestDataFrameWrites.java  |  233 +-
 .../spark/source/TestDataSourceOptions.java        |  299 +--
 .../iceberg/spark/source/TestFilteredScan.java     |  388 +--
 .../spark/source/TestForwardCompatibility.java     |  101 +-
 .../iceberg/spark/source/TestIcebergSource.java    |    2 -
 .../source/TestIcebergSourceHadoopTables.java      |    1 -
 .../spark/source/TestIcebergSourceHiveTables.java  |    4 +-
 .../spark/source/TestIcebergSourceTablesBase.java  | 1488 +++++++-----
 .../iceberg/spark/source/TestIcebergSpark.java     |   83 +-
 .../spark/source/TestIdentityPartitionData.java    |  174 +-
 .../spark/source/TestInternalRowWrapper.java       |    7 +-
 .../TestMetadataTablesWithPartitionEvolution.java  |  175 +-
 .../iceberg/spark/source/TestParquetScan.java      |   49 +-
 .../iceberg/spark/source/TestPartitionPruning.java |  319 ++-
 .../iceberg/spark/source/TestPartitionValues.java  |  316 +--
 .../iceberg/spark/source/TestPathIdentifier.java   |   25 +-
 .../iceberg/spark/source/TestReadProjection.java   |  439 ++--
 .../spark/source/TestSnapshotSelection.java        |  138 +-
 .../spark/source/TestSparkAppenderFactory.java     |    9 +-
 .../spark/source/TestSparkBaseDataReader.java      |   88 +-
 .../iceberg/spark/source/TestSparkCatalog.java     |    4 +-
 .../source/TestSparkCatalogCacheExpiration.java    |  102 +-
 .../source/TestSparkCatalogHadoopOverrides.java    |   71 +-
 .../iceberg/spark/source/TestSparkDataFile.java    |  108 +-
 .../iceberg/spark/source/TestSparkDataWrite.java   |  319 ++-
 .../spark/source/TestSparkFileWriterFactory.java   |    9 +-
 .../iceberg/spark/source/TestSparkFilesScan.java   |   47 +-
 .../spark/source/TestSparkMergingMetrics.java      |   39 +-
 .../spark/source/TestSparkMetadataColumns.java     |  117 +-
 .../spark/source/TestSparkPartitioningWriters.java |    9 +-
 .../source/TestSparkPositionDeltaWriters.java      |    9 +-
 .../spark/source/TestSparkReadProjection.java      |  224 +-
 .../spark/source/TestSparkReaderDeletes.java       |  159 +-
 .../spark/source/TestSparkRollingFileWriters.java  |    9 +-
 .../iceberg/spark/source/TestSparkTable.java       |    1 -
 .../spark/source/TestSparkWriterMetrics.java       |    1 -
 .../iceberg/spark/source/TestStreamingOffset.java  |   20 +-
 .../spark/source/TestStructuredStreaming.java      |  148 +-
 .../spark/source/TestStructuredStreamingRead3.java |  277 +--
 .../apache/iceberg/spark/source/TestTables.java    |   11 +-
 .../spark/source/TestTimestampWithoutZone.java     |  169 +-
 .../spark/source/TestWriteMetricsConfig.java       |   84 +-
 .../iceberg/spark/source/ThreeColumnRecord.java    |   16 +-
 .../apache/iceberg/spark/sql/TestAlterTable.java   |  267 +-
 .../apache/iceberg/spark/sql/TestCreateTable.java  |  218 +-
 .../iceberg/spark/sql/TestCreateTableAsSelect.java |  391 +--
 .../apache/iceberg/spark/sql/TestDeleteFrom.java   |   74 +-
 .../apache/iceberg/spark/sql/TestNamespaceSQL.java |   72 +-
 .../iceberg/spark/sql/TestPartitionedWrites.java   |  101 +-
 .../apache/iceberg/spark/sql/TestRefreshTable.java |   11 +-
 .../org/apache/iceberg/spark/sql/TestSelect.java   |   85 +-
 .../spark/sql/TestTimestampWithoutZone.java        |  168 +-
 .../iceberg/spark/sql/TestUnpartitionedWrites.java |   99 +-
 .../apache/iceberg/spark/extensions/Employee.java  |    4 +-
 .../spark/extensions/SparkExtensionsTestBase.java  |   35 +-
 .../SparkRowLevelOperationsTestBase.java           |  131 +-
 .../spark/extensions/TestAddFilesProcedure.java    |  605 +++--
 .../extensions/TestAlterTablePartitionFields.java  |  250 +-
 .../spark/extensions/TestAlterTableSchema.java     |   72 +-
 .../spark/extensions/TestAncestorsOfProcedure.java |   37 +-
 .../spark/extensions/TestCallStatementParser.java  |   92 +-
 .../TestCherrypickSnapshotProcedure.java           |   85 +-
 .../spark/extensions/TestCopyOnWriteDelete.java    |   10 +-
 .../spark/extensions/TestCopyOnWriteMerge.java     |   10 +-
 .../spark/extensions/TestCopyOnWriteUpdate.java    |   10 +-
 .../iceberg/spark/extensions/TestDelete.java       |  376 +--
 .../extensions/TestExpireSnapshotsProcedure.java   |  205 +-
 .../spark/extensions/TestIcebergExpressions.java   |   34 +-
 .../apache/iceberg/spark/extensions/TestMerge.java | 2176 ++++++++++-------
 .../extensions/TestMigrateTableProcedure.java      |   64 +-
 .../extensions/TestPublishChangesProcedure.java    |   77 +-
 .../extensions/TestRemoveOrphanFilesProcedure.java |  203 +-
 .../extensions/TestRewriteDataFilesProcedure.java  |  285 ++-
 .../extensions/TestRewriteManifestsProcedure.java  |  130 +-
 .../TestRollbackToSnapshotProcedure.java           |  141 +-
 .../TestRollbackToTimestampProcedure.java          |  140 +-
 .../TestSetCurrentSnapshotProcedure.java           |  131 +-
 .../TestSetWriteDistributionAndOrdering.java       |  130 +-
 .../extensions/TestSnapshotTableProcedure.java     |  132 +-
 .../iceberg/spark/extensions/TestUpdate.java       |  666 +++--
 .../apache/iceberg/spark/SparkBenchmarkUtil.java   |    7 +-
 .../SparkParquetReadersFlatDataBenchmark.java      |  136 +-
 .../SparkParquetReadersNestedDataBenchmark.java    |  138 +-
 .../SparkParquetWritersFlatDataBenchmark.java      |   59 +-
 .../SparkParquetWritersNestedDataBenchmark.java    |   62 +-
 .../org/apache/iceberg/spark/source/Action.java    |    1 -
 .../spark/source/IcebergSourceBenchmark.java       |   93 +-
 .../source/IcebergSourceFlatDataBenchmark.java     |   26 +-
 .../source/IcebergSourceNestedDataBenchmark.java   |   25 +-
 .../IcebergSourceNestedListDataBenchmark.java      |   26 +-
 .../iceberg/spark/source/WritersBenchmark.java     |  166 +-
 .../spark/source/avro/AvroWritersBenchmark.java    |    4 +-
 .../IcebergSourceFlatAvroDataReadBenchmark.java    |   96 +-
 .../IcebergSourceNestedAvroDataReadBenchmark.java  |   96 +-
 .../orc/IcebergSourceFlatORCDataBenchmark.java     |   37 +-
 .../orc/IcebergSourceFlatORCDataReadBenchmark.java |  152 +-
 ...ebergSourceNestedListORCDataWriteBenchmark.java |   52 +-
 .../IcebergSourceNestedORCDataReadBenchmark.java   |  132 +-
 ...cebergSourceFlatParquetDataFilterBenchmark.java |   71 +-
 .../IcebergSourceFlatParquetDataReadBenchmark.java |  102 +-
 ...IcebergSourceFlatParquetDataWriteBenchmark.java |   15 +-
 ...gSourceNestedListParquetDataWriteBenchmark.java |   28 +-
 ...bergSourceNestedParquetDataFilterBenchmark.java |   71 +-
 ...cebergSourceNestedParquetDataReadBenchmark.java |  100 +-
 ...ebergSourceNestedParquetDataWriteBenchmark.java |   20 +-
 .../source/parquet/ParquetWritersBenchmark.java    |    4 +-
 ...dDictionaryEncodedFlatParquetDataBenchmark.java |   38 +-
 .../VectorizedReadFlatParquetDataBenchmark.java    |  265 +-
 .../java/org/apache/iceberg/spark/BaseCatalog.java |    4 +-
 .../org/apache/iceberg/spark/CommitMetadata.java   |   17 +-
 .../iceberg/spark/FileRewriteCoordinator.java      |   20 +-
 .../iceberg/spark/FileScanTaskSetManager.java      |    7 +-
 .../org/apache/iceberg/spark/IcebergSpark.java     |   15 +-
 .../org/apache/iceberg/spark/JobGroupInfo.java     |    6 +-
 .../org/apache/iceberg/spark/JobGroupUtils.java    |   10 +-
 .../java/org/apache/iceberg/spark/OrderField.java  |   53 +-
 .../org/apache/iceberg/spark/PathIdentifier.java   |    8 +-
 .../iceberg/spark/PruneColumnsWithReordering.java  |   89 +-
 .../spark/PruneColumnsWithoutReordering.java       |   72 +-
 .../apache/iceberg/spark/RollbackStagedTable.java  |   51 +-
 .../org/apache/iceberg/spark/SortOrderToSpark.java |    8 +-
 .../java/org/apache/iceberg/spark/Spark3Util.java  |  423 ++--
 .../org/apache/iceberg/spark/SparkCatalog.java     |  210 +-
 .../org/apache/iceberg/spark/SparkConfParser.java  |    4 +-
 .../org/apache/iceberg/spark/SparkDataFile.java    |   25 +-
 .../apache/iceberg/spark/SparkExceptionUtil.java   |    8 +-
 .../org/apache/iceberg/spark/SparkFilters.java     |  125 +-
 .../iceberg/spark/SparkFixupTimestampType.java     |   12 +-
 .../org/apache/iceberg/spark/SparkFixupTypes.java  |    9 +-
 .../org/apache/iceberg/spark/SparkReadConf.java    |   93 +-
 .../org/apache/iceberg/spark/SparkReadOptions.java |   14 +-
 .../apache/iceberg/spark/SparkSQLProperties.java   |    7 +-
 .../org/apache/iceberg/spark/SparkSchemaUtil.java  |  138 +-
 .../apache/iceberg/spark/SparkSessionCatalog.java  |   66 +-
 .../org/apache/iceberg/spark/SparkStructLike.java  |    1 -
 .../org/apache/iceberg/spark/SparkTableUtil.java   |  463 ++--
 .../org/apache/iceberg/spark/SparkTypeToType.java  |   23 +-
 .../org/apache/iceberg/spark/SparkTypeVisitor.java |   15 +-
 .../java/org/apache/iceberg/spark/SparkUtil.java   |  118 +-
 .../apache/iceberg/spark/SparkValueConverter.java  |    8 +-
 .../org/apache/iceberg/spark/SparkWriteConf.java   |   78 +-
 .../apache/iceberg/spark/SparkWriteOptions.java    |   11 +-
 .../org/apache/iceberg/spark/TypeToSparkType.java  |   11 +-
 .../actions/BaseDeleteOrphanFilesSparkAction.java  |  111 +-
 .../BaseDeleteReachableFilesSparkAction.java       |  105 +-
 .../actions/BaseExpireSnapshotsSparkAction.java    |  132 +-
 .../spark/actions/BaseMigrateTableSparkAction.java |   38 +-
 .../actions/BaseRewriteDataFilesSpark3Action.java  |    1 -
 .../actions/BaseRewriteDataFilesSparkAction.java   |  341 +--
 .../actions/BaseRewriteManifestsSparkAction.java   |  143 +-
 .../actions/BaseSnapshotTableSparkAction.java      |   61 +-
 .../actions/BaseSnapshotUpdateSparkAction.java     |    5 +-
 .../iceberg/spark/actions/BaseSparkAction.java     |   24 +-
 .../iceberg/spark/actions/BaseSparkActions.java    |    1 -
 .../actions/BaseTableCreationSparkAction.java      |   50 +-
 .../iceberg/spark/actions/ManifestFileBean.java    |    1 -
 .../spark/actions/Spark3BinPackStrategy.java       |   17 +-
 .../iceberg/spark/actions/Spark3SortStrategy.java  |   51 +-
 .../apache/iceberg/spark/actions/SparkActions.java |   19 +-
 .../spark/data/AvroWithSparkSchemaVisitor.java     |   10 +-
 .../spark/data/ParquetWithSparkSchemaVisitor.java  |   83 +-
 .../apache/iceberg/spark/data/SparkAvroReader.java |   21 +-
 .../apache/iceberg/spark/data/SparkAvroWriter.java |   35 +-
 .../apache/iceberg/spark/data/SparkOrcReader.java  |   27 +-
 .../iceberg/spark/data/SparkOrcValueReaders.java   |   53 +-
 .../iceberg/spark/data/SparkOrcValueWriters.java   |   42 +-
 .../apache/iceberg/spark/data/SparkOrcWriter.java  |   46 +-
 .../iceberg/spark/data/SparkParquetReaders.java    |   89 +-
 .../iceberg/spark/data/SparkParquetWriters.java    |  104 +-
 .../iceberg/spark/data/SparkValueReaders.java      |   44 +-
 .../iceberg/spark/data/SparkValueWriters.java      |   42 +-
 .../vectorized/ArrowVectorAccessorFactory.java     |   21 +-
 .../data/vectorized/ArrowVectorAccessors.java      |    4 +-
 .../spark/data/vectorized/ColumnarBatchReader.java |   16 +-
 .../data/vectorized/ConstantColumnVector.java      |    4 +-
 .../data/vectorized/IcebergArrowColumnVector.java  |   18 +-
 .../data/vectorized/RowPositionColumnVector.java   |    4 +-
 .../data/vectorized/VectorizedSparkOrcReaders.java |  108 +-
 .../vectorized/VectorizedSparkParquetReaders.java  |   19 +-
 .../spark/procedures/AddFilesProcedure.java        |  204 +-
 .../spark/procedures/AncestorsOfProcedure.java     |   26 +-
 .../iceberg/spark/procedures/BaseProcedure.java    |   36 +-
 .../procedures/CherrypickSnapshotProcedure.java    |   45 +-
 .../spark/procedures/ExpireSnapshotsProcedure.java |   93 +-
 .../spark/procedures/MigrateTableProcedure.java    |   37 +-
 .../spark/procedures/PublishChangesProcedure.java  |   61 +-
 .../procedures/RemoveOrphanFilesProcedure.java     |   94 +-
 .../procedures/RewriteDataFilesProcedure.java      |  102 +-
 .../procedures/RewriteManifestsProcedure.java      |   46 +-
 .../procedures/RollbackToSnapshotProcedure.java    |   41 +-
 .../procedures/RollbackToTimestampProcedure.java   |   44 +-
 .../procedures/SetCurrentSnapshotProcedure.java    |   43 +-
 .../spark/procedures/SnapshotTableProcedure.java   |   46 +-
 .../iceberg/spark/procedures/SparkProcedures.java  |    5 +-
 .../iceberg/spark/source/BaseDataReader.java       |   16 +-
 .../iceberg/spark/source/BatchDataReader.java      |   61 +-
 .../spark/source/EqualityDeleteRowReader.java      |    4 +-
 .../apache/iceberg/spark/source/IcebergSource.java |   67 +-
 .../iceberg/spark/source/InternalRowWrapper.java   |   13 +-
 .../apache/iceberg/spark/source/RowDataReader.java |   72 +-
 .../iceberg/spark/source/RowDataRewriter.java      |   81 +-
 .../spark/source/SerializableTableWithSize.java    |    7 +-
 .../iceberg/spark/source/SparkAppenderFactory.java |   67 +-
 .../iceberg/spark/source/SparkBatchQueryScan.java  |   53 +-
 .../iceberg/spark/source/SparkBatchScan.java       |   91 +-
 .../spark/source/SparkFileWriterFactory.java       |   82 +-
 .../iceberg/spark/source/SparkFilesScan.java       |   37 +-
 .../spark/source/SparkFilesScanBuilder.java        |    1 -
 .../iceberg/spark/source/SparkMergeBuilder.java    |   41 +-
 .../iceberg/spark/source/SparkMergeScan.java       |   67 +-
 .../spark/source/SparkMicroBatchStream.java        |   90 +-
 .../spark/source/SparkPartitionedFanoutWriter.java |   12 +-
 .../spark/source/SparkPartitionedWriter.java       |   14 +-
 .../iceberg/spark/source/SparkRewriteBuilder.java  |   10 +-
 .../iceberg/spark/source/SparkScanBuilder.java     |   52 +-
 .../apache/iceberg/spark/source/SparkTable.java    |   87 +-
 .../apache/iceberg/spark/source/SparkWrite.java    |  174 +-
 .../iceberg/spark/source/SparkWriteBuilder.java    |   41 +-
 .../iceberg/spark/source/StagedSparkTable.java     |    1 -
 .../org/apache/iceberg/spark/source/Stats.java     |    1 -
 .../iceberg/spark/source/StreamingOffset.java      |   29 +-
 .../iceberg/spark/source/StructInternalRow.java    |   63 +-
 .../analysis/NoSuchProcedureException.java         |    8 +-
 .../iceberg/catalog/ExtendedSupportsDelete.java    |   18 +-
 .../sql/connector/iceberg/catalog/Procedure.java   |   30 +-
 .../iceberg/catalog/ProcedureCatalog.java          |    7 +-
 .../iceberg/catalog/ProcedureParameter.java        |   17 +-
 .../iceberg/catalog/ProcedureParameterImpl.java    |   14 +-
 .../connector/iceberg/catalog/SupportsMerge.java   |   10 +-
 .../distributions/ClusteredDistribution.java       |    5 +-
 .../iceberg/distributions/Distribution.java        |    5 +-
 .../iceberg/distributions/Distributions.java       |   12 +-
 .../iceberg/distributions/OrderedDistribution.java |    9 +-
 .../distributions/UnspecifiedDistribution.java     |    1 -
 .../impl/ClusterDistributionImpl.java              |    2 -
 .../impl/OrderedDistributionImpl.java              |    2 -
 .../impl/UnspecifiedDistributionImpl.java          |    6 +-
 .../iceberg/expressions/NullOrdering.java          |    4 +-
 .../iceberg/expressions/SortDirection.java         |    4 +-
 .../connector/iceberg/expressions/SortOrder.java   |   13 +-
 .../connector/iceberg/read/SupportsFileFilter.java |    1 -
 .../sql/connector/iceberg/write/MergeBuilder.java  |    5 +-
 .../test/java/org/apache/iceberg/KryoHelpers.java  |    7 +-
 .../java/org/apache/iceberg/TaskCheckHelper.java   |   80 +-
 .../apache/iceberg/TestDataFileSerialization.java  |   72 +-
 .../apache/iceberg/TestFileIOSerialization.java    |   31 +-
 .../iceberg/TestManifestFileSerialization.java     |  151 +-
 .../apache/iceberg/TestScanTaskSerialization.java  |   50 +-
 .../org/apache/iceberg/TestTableSerialization.java |   47 +-
 .../apache/iceberg/spark/SparkCatalogConfig.java   |   31 +-
 .../apache/iceberg/spark/SparkCatalogTestBase.java |   38 +-
 .../org/apache/iceberg/spark/SparkTestBase.java    |  134 +-
 .../iceberg/spark/SparkTestBaseWithCatalog.java    |   20 +-
 .../iceberg/spark/TestFileRewriteCoordinator.java  |  117 +-
 .../org/apache/iceberg/spark/TestSpark3Util.java   |  100 +-
 .../iceberg/spark/TestSparkCatalogOperations.java  |    1 -
 .../org/apache/iceberg/spark/TestSparkFilters.java |   25 +-
 .../apache/iceberg/spark/TestSparkSchemaUtil.java  |   23 +-
 .../apache/iceberg/spark/TestSparkTableUtil.java   |    1 -
 .../iceberg/spark/TestSparkValueConverter.java     |   62 +-
 .../iceberg/spark/actions/TestCreateActions.java   |  386 ++-
 .../actions/TestDeleteReachableFilesAction.java    |  328 ++-
 .../spark/actions/TestExpireSnapshotsAction.java   | 1000 ++++----
 .../spark/actions/TestRemoveOrphanFilesAction.java |  461 ++--
 .../actions/TestRemoveOrphanFilesAction3.java      |   66 +-
 .../spark/actions/TestRewriteDataFilesAction.java  |  626 +++--
 .../spark/actions/TestRewriteManifestsAction.java  |  274 +--
 .../apache/iceberg/spark/data/AvroDataTest.java    |  277 ++-
 .../apache/iceberg/spark/data/GenericsHelpers.java |  135 +-
 .../org/apache/iceberg/spark/data/RandomData.java  |  103 +-
 .../org/apache/iceberg/spark/data/TestHelpers.java |  294 ++-
 .../apache/iceberg/spark/data/TestOrcWrite.java    |   24 +-
 .../iceberg/spark/data/TestParquetAvroReader.java  |  156 +-
 .../iceberg/spark/data/TestParquetAvroWriter.java  |   98 +-
 .../iceberg/spark/data/TestSparkAvroEnums.java     |   35 +-
 .../iceberg/spark/data/TestSparkAvroReader.java    |   20 +-
 .../iceberg/spark/data/TestSparkDateTimes.java     |    5 +-
 .../data/TestSparkOrcReadMetadataColumns.java      |   91 +-
 .../iceberg/spark/data/TestSparkOrcReader.java     |   55 +-
 .../data/TestSparkParquetReadMetadataColumns.java  |   98 +-
 .../iceberg/spark/data/TestSparkParquetReader.java |   41 +-
 .../iceberg/spark/data/TestSparkParquetWriter.java |   99 +-
 .../spark/data/TestSparkRecordOrcReaderWriter.java |   67 +-
 ...estParquetDictionaryEncodedVectorizedReads.java |   57 +-
 ...naryFallbackToPlainEncodingVectorizedReads.java |   23 +-
 .../vectorized/TestParquetVectorizedReads.java     |  245 +-
 .../apache/iceberg/spark/source/LogMessage.java    |    1 -
 .../apache/iceberg/spark/source/ManualSource.java  |    7 +-
 .../apache/iceberg/spark/source/SimpleRecord.java  |    4 +-
 .../iceberg/spark/source/SparkTestTable.java       |    1 -
 .../apache/iceberg/spark/source/TestAvroScan.java  |   32 +-
 .../iceberg/spark/source/TestDataFrameWrites.java  |  233 +-
 .../spark/source/TestDataSourceOptions.java        |  299 +--
 .../iceberg/spark/source/TestFilteredScan.java     |  388 +--
 .../spark/source/TestForwardCompatibility.java     |  101 +-
 .../iceberg/spark/source/TestIcebergSource.java    |    2 -
 .../source/TestIcebergSourceHadoopTables.java      |    1 -
 .../spark/source/TestIcebergSourceHiveTables.java  |    4 +-
 .../spark/source/TestIcebergSourceTablesBase.java  | 1489 +++++++-----
 .../iceberg/spark/source/TestIcebergSpark.java     |   83 +-
 .../spark/source/TestIdentityPartitionData.java    |  174 +-
 .../spark/source/TestInternalRowWrapper.java       |    7 +-
 .../TestMetadataTablesWithPartitionEvolution.java  |  175 +-
 .../iceberg/spark/source/TestParquetScan.java      |   49 +-
 .../iceberg/spark/source/TestPartitionPruning.java |  319 ++-
 .../iceberg/spark/source/TestPartitionValues.java  |  316 +--
 .../iceberg/spark/source/TestPathIdentifier.java   |   25 +-
 .../iceberg/spark/source/TestReadProjection.java   |  439 ++--
 .../spark/source/TestSnapshotSelection.java        |  138 +-
 .../spark/source/TestSparkAppenderFactory.java     |    9 +-
 .../spark/source/TestSparkBaseDataReader.java      |   88 +-
 .../iceberg/spark/source/TestSparkCatalog.java     |    4 +-
 .../source/TestSparkCatalogCacheExpiration.java    |  102 +-
 .../source/TestSparkCatalogHadoopOverrides.java    |   71 +-
 .../iceberg/spark/source/TestSparkDataFile.java    |  108 +-
 .../iceberg/spark/source/TestSparkDataWrite.java   |  319 ++-
 .../spark/source/TestSparkFileWriterFactory.java   |    9 +-
 .../iceberg/spark/source/TestSparkFilesScan.java   |   47 +-
 .../spark/source/TestSparkMergingMetrics.java      |   39 +-
 .../spark/source/TestSparkMetadataColumns.java     |  117 +-
 .../spark/source/TestSparkPartitioningWriters.java |    9 +-
 .../source/TestSparkPositionDeltaWriters.java      |    9 +-
 .../spark/source/TestSparkReadProjection.java      |  224 +-
 .../spark/source/TestSparkReaderDeletes.java       |  159 +-
 .../spark/source/TestSparkRollingFileWriters.java  |    9 +-
 .../iceberg/spark/source/TestSparkTable.java       |    1 -
 .../spark/source/TestSparkWriterMetrics.java       |    1 -
 .../iceberg/spark/source/TestStreamingOffset.java  |   20 +-
 .../spark/source/TestStructuredStreaming.java      |  148 +-
 .../spark/source/TestStructuredStreamingRead3.java |  277 +--
 .../apache/iceberg/spark/source/TestTables.java    |   11 +-
 .../spark/source/TestTimestampWithoutZone.java     |  169 +-
 .../spark/source/TestWriteMetricsConfig.java       |   84 +-
 .../iceberg/spark/source/ThreeColumnRecord.java    |   16 +-
 .../apache/iceberg/spark/sql/TestAlterTable.java   |  267 +-
 .../apache/iceberg/spark/sql/TestCreateTable.java  |  218 +-
 .../iceberg/spark/sql/TestCreateTableAsSelect.java |  401 +--
 .../apache/iceberg/spark/sql/TestDeleteFrom.java   |   21 +-
 .../apache/iceberg/spark/sql/TestDropTable.java    |   66 +-
 .../apache/iceberg/spark/sql/TestNamespaceSQL.java |   72 +-
 .../iceberg/spark/sql/TestPartitionedWrites.java   |  101 +-
 .../apache/iceberg/spark/sql/TestRefreshTable.java |    8 +-
 .../org/apache/iceberg/spark/sql/TestSelect.java   |   85 +-
 .../spark/sql/TestTimestampWithoutZone.java        |  168 +-
 .../iceberg/spark/sql/TestUnpartitionedWrites.java |   99 +-
 .../apache/iceberg/spark/extensions/Employee.java  |    4 +-
 .../spark/extensions/SparkExtensionsTestBase.java  |   38 +-
 .../SparkRowLevelOperationsTestBase.java           |  189 +-
 .../spark/extensions/TestAddFilesProcedure.java    |  583 +++--
 .../extensions/TestAlterTablePartitionFields.java  |  250 +-
 .../spark/extensions/TestAlterTableSchema.java     |   72 +-
 .../spark/extensions/TestAncestorsOfProcedure.java |   37 +-
 .../spark/extensions/TestCallStatementParser.java  |   92 +-
 .../TestCherrypickSnapshotProcedure.java           |   85 +-
 .../spark/extensions/TestConflictValidation.java   |  133 +-
 .../spark/extensions/TestCopyOnWriteDelete.java    |   10 +-
 .../spark/extensions/TestCopyOnWriteMerge.java     |   10 +-
 .../spark/extensions/TestCopyOnWriteUpdate.java    |   10 +-
 .../iceberg/spark/extensions/TestDelete.java       |  433 ++--
 .../spark/extensions/TestDuplicateSnapshotIDs.java |   47 +-
 .../extensions/TestExpireSnapshotsProcedure.java   |  275 ++-
 .../spark/extensions/TestIcebergExpressions.java   |   34 +-
 .../apache/iceberg/spark/extensions/TestMerge.java | 2568 +++++++++++---------
 .../spark/extensions/TestMergeOnReadDelete.java    |   61 +-
 .../spark/extensions/TestMergeOnReadMerge.java     |   13 +-
 .../spark/extensions/TestMergeOnReadUpdate.java    |   13 +-
 .../spark/extensions/TestMetadataTables.java       |  297 ++-
 .../extensions/TestMigrateTableProcedure.java      |   64 +-
 .../extensions/TestPublishChangesProcedure.java    |   77 +-
 .../extensions/TestRegisterTableProcedure.java     |   31 +-
 .../extensions/TestRemoveOrphanFilesProcedure.java |  249 +-
 .../TestRequiredDistributionAndOrdering.java       |  257 +-
 .../extensions/TestRewriteDataFilesProcedure.java  |  420 ++--
 .../extensions/TestRewriteManifestsProcedure.java  |  130 +-
 .../TestRollbackToSnapshotProcedure.java           |  141 +-
 .../TestRollbackToTimestampProcedure.java          |  140 +-
 .../TestSetCurrentSnapshotProcedure.java           |  131 +-
 .../TestSetWriteDistributionAndOrdering.java       |  130 +-
 .../extensions/TestSnapshotTableProcedure.java     |  132 +-
 .../iceberg/spark/extensions/TestUpdate.java       |  745 +++---
 .../apache/iceberg/spark/SparkBenchmarkUtil.java   |    7 +-
 .../action/IcebergSortCompactionBenchmark.java     |  203 +-
 .../iceberg/spark/action/RandomGeneratingUDF.java  |   16 +-
 .../SparkParquetReadersFlatDataBenchmark.java      |  136 +-
 .../SparkParquetReadersNestedDataBenchmark.java    |  138 +-
 .../SparkParquetWritersFlatDataBenchmark.java      |   59 +-
 .../SparkParquetWritersNestedDataBenchmark.java    |   62 +-
 .../org/apache/iceberg/spark/source/Action.java    |    1 -
 .../spark/source/IcebergSourceBenchmark.java       |   93 +-
 .../spark/source/IcebergSourceDeleteBenchmark.java |  152 +-
 .../source/IcebergSourceFlatDataBenchmark.java     |   26 +-
 .../source/IcebergSourceNestedDataBenchmark.java   |   25 +-
 .../IcebergSourceNestedListDataBenchmark.java      |   26 +-
 .../iceberg/spark/source/WritersBenchmark.java     |  166 +-
 .../spark/source/avro/AvroWritersBenchmark.java    |    6 +-
 .../IcebergSourceFlatAvroDataReadBenchmark.java    |   98 +-
 .../IcebergSourceNestedAvroDataReadBenchmark.java  |   98 +-
 .../orc/IcebergSourceFlatORCDataBenchmark.java     |   37 +-
 .../orc/IcebergSourceFlatORCDataReadBenchmark.java |  154 +-
 ...ebergSourceNestedListORCDataWriteBenchmark.java |   54 +-
 .../IcebergSourceNestedORCDataReadBenchmark.java   |  134 +-
 ...cebergSourceFlatParquetDataFilterBenchmark.java |   71 +-
 .../IcebergSourceFlatParquetDataReadBenchmark.java |  102 +-
 ...IcebergSourceFlatParquetDataWriteBenchmark.java |   15 +-
 ...gSourceNestedListParquetDataWriteBenchmark.java |   28 +-
 ...bergSourceNestedParquetDataFilterBenchmark.java |   71 +-
 ...cebergSourceNestedParquetDataReadBenchmark.java |  100 +-
 ...ebergSourceNestedParquetDataWriteBenchmark.java |   20 +-
 .../IcebergSourceParquetEqDeleteBenchmark.java     |   11 +-
 ...ebergSourceParquetMultiDeleteFileBenchmark.java |   11 +-
 .../IcebergSourceParquetPosDeleteBenchmark.java    |   11 +-
 ...gSourceParquetWithUnrelatedDeleteBenchmark.java |   20 +-
 .../source/parquet/ParquetWritersBenchmark.java    |    4 +-
 ...dDictionaryEncodedFlatParquetDataBenchmark.java |   38 +-
 .../VectorizedReadFlatParquetDataBenchmark.java    |  265 +-
 .../java/org/apache/iceberg/spark/BaseCatalog.java |    7 +-
 .../org/apache/iceberg/spark/CommitMetadata.java   |   17 +-
 .../org/apache/iceberg/spark/ExtendedParser.java   |    7 +-
 .../iceberg/spark/FileRewriteCoordinator.java      |   20 +-
 .../iceberg/spark/FileScanTaskSetManager.java      |    7 +-
 .../org/apache/iceberg/spark/IcebergSpark.java     |   26 +-
 .../org/apache/iceberg/spark/JobGroupInfo.java     |    6 +-
 .../org/apache/iceberg/spark/JobGroupUtils.java    |   10 +-
 .../org/apache/iceberg/spark/PathIdentifier.java   |    8 +-
 .../iceberg/spark/PruneColumnsWithReordering.java  |   89 +-
 .../spark/PruneColumnsWithoutReordering.java       |   72 +-
 .../apache/iceberg/spark/RollbackStagedTable.java  |   51 +-
 .../org/apache/iceberg/spark/SortOrderToSpark.java |   37 +-
 .../java/org/apache/iceberg/spark/Spark3Util.java  |  431 ++--
 .../iceberg/spark/SparkCachedTableCatalog.java     |   19 +-
 .../org/apache/iceberg/spark/SparkCatalog.java     |  210 +-
 .../org/apache/iceberg/spark/SparkConfParser.java  |    4 +-
 .../org/apache/iceberg/spark/SparkDataFile.java    |   25 +-
 .../spark/SparkDistributionAndOrderingUtil.java    |   93 +-
 .../apache/iceberg/spark/SparkExceptionUtil.java   |    8 +-
 .../org/apache/iceberg/spark/SparkFilters.java     |  140 +-
 .../iceberg/spark/SparkFixupTimestampType.java     |   12 +-
 .../org/apache/iceberg/spark/SparkFixupTypes.java  |    9 +-
 .../org/apache/iceberg/spark/SparkReadConf.java    |  109 +-
 .../org/apache/iceberg/spark/SparkReadOptions.java |   14 +-
 .../apache/iceberg/spark/SparkSQLProperties.java   |    7 +-
 .../org/apache/iceberg/spark/SparkSchemaUtil.java  |  166 +-
 .../apache/iceberg/spark/SparkSessionCatalog.java  |   75 +-
 .../org/apache/iceberg/spark/SparkStructLike.java  |    1 -
 .../org/apache/iceberg/spark/SparkTableCache.java  |    1 -
 .../org/apache/iceberg/spark/SparkTableUtil.java   |  451 ++--
 .../org/apache/iceberg/spark/SparkTypeToType.java  |   23 +-
 .../org/apache/iceberg/spark/SparkTypeVisitor.java |   15 +-
 .../java/org/apache/iceberg/spark/SparkUtil.java   |  175 +-
 .../apache/iceberg/spark/SparkValueConverter.java  |    8 +-
 .../org/apache/iceberg/spark/SparkWriteConf.java   |  177 +-
 .../apache/iceberg/spark/SparkWriteOptions.java    |   17 +-
 .../org/apache/iceberg/spark/TypeToSparkType.java  |   11 +-
 .../actions/BaseDeleteOrphanFilesSparkAction.java  |    5 +-
 .../BaseDeleteReachableFilesSparkAction.java       |    5 +-
 .../actions/BaseExpireSnapshotsSparkAction.java    |    5 +-
 .../spark/actions/BaseMigrateTableSparkAction.java |    8 +-
 .../actions/BaseRewriteDataFilesSparkAction.java   |    5 +-
 .../actions/BaseRewriteManifestsSparkAction.java   |    5 +-
 .../actions/BaseSnapshotTableSparkAction.java      |    8 +-
 .../actions/BaseSnapshotUpdateSparkAction.java     |    1 -
 .../iceberg/spark/actions/BaseSparkAction.java     |   63 +-
 .../actions/BaseTableCreationSparkAction.java      |   50 +-
 .../actions/DeleteOrphanFilesSparkAction.java      |  174 +-
 .../actions/DeleteReachableFilesSparkAction.java   |   98 +-
 .../spark/actions/ExpireSnapshotsSparkAction.java  |  158 +-
 .../iceberg/spark/actions/ManifestFileBean.java    |    1 -
 .../spark/actions/MigrateTableSparkAction.java     |   41 +-
 .../spark/actions/RewriteDataFilesSparkAction.java |  339 +--
 .../spark/actions/RewriteManifestsSparkAction.java |  146 +-
 .../spark/actions/SnapshotTableSparkAction.java    |   56 +-
 .../apache/iceberg/spark/actions/SparkActions.java |   19 +-
 .../spark/actions/SparkBinPackStrategy.java        |   23 +-
 .../iceberg/spark/actions/SparkSortStrategy.java   |   51 +-
 .../iceberg/spark/actions/SparkZOrderStrategy.java |  128 +-
 .../iceberg/spark/actions/SparkZOrderUDF.java      |  177 +-
 .../spark/data/AvroWithSparkSchemaVisitor.java     |   10 +-
 .../spark/data/ParquetWithSparkSchemaVisitor.java  |   83 +-
 .../apache/iceberg/spark/data/SparkAvroReader.java |   21 +-
 .../apache/iceberg/spark/data/SparkAvroWriter.java |   35 +-
 .../apache/iceberg/spark/data/SparkOrcReader.java  |   27 +-
 .../iceberg/spark/data/SparkOrcValueReaders.java   |   53 +-
 .../iceberg/spark/data/SparkOrcValueWriters.java   |   42 +-
 .../apache/iceberg/spark/data/SparkOrcWriter.java  |   46 +-
 .../iceberg/spark/data/SparkParquetReaders.java    |   89 +-
 .../iceberg/spark/data/SparkParquetWriters.java    |  104 +-
 .../iceberg/spark/data/SparkValueReaders.java      |   44 +-
 .../iceberg/spark/data/SparkValueWriters.java      |   42 +-
 .../vectorized/ArrowVectorAccessorFactory.java     |   21 +-
 .../data/vectorized/ArrowVectorAccessors.java      |    4 +-
 .../data/vectorized/ColumnVectorWithFilter.java    |    8 +-
 .../spark/data/vectorized/ColumnarBatchReader.java |   52 +-
 .../data/vectorized/ConstantColumnVector.java      |    4 +-
 .../data/vectorized/IcebergArrowColumnVector.java  |   18 +-
 .../data/vectorized/RowPositionColumnVector.java   |    4 +-
 .../data/vectorized/VectorizedSparkOrcReaders.java |  108 +-
 .../vectorized/VectorizedSparkParquetReaders.java  |   55 +-
 .../spark/procedures/AddFilesProcedure.java        |  198 +-
 .../spark/procedures/AncestorsOfProcedure.java     |   26 +-
 .../iceberg/spark/procedures/BaseProcedure.java    |   72 +-
 .../procedures/CherrypickSnapshotProcedure.java    |   45 +-
 .../spark/procedures/ExpireSnapshotsProcedure.java |   94 +-
 .../spark/procedures/MigrateTableProcedure.java    |   37 +-
 .../spark/procedures/PublishChangesProcedure.java  |   61 +-
 .../spark/procedures/RegisterTableProcedure.java   |   38 +-
 .../procedures/RemoveOrphanFilesProcedure.java     |   93 +-
 .../procedures/RewriteDataFilesProcedure.java      |  114 +-
 .../procedures/RewriteManifestsProcedure.java      |   46 +-
 .../procedures/RollbackToSnapshotProcedure.java    |   41 +-
 .../procedures/RollbackToTimestampProcedure.java   |   44 +-
 .../procedures/SetCurrentSnapshotProcedure.java    |   43 +-
 .../spark/procedures/SnapshotTableProcedure.java   |   46 +-
 .../iceberg/spark/procedures/SparkProcedures.java  |    5 +-
 .../iceberg/spark/source/BaseDataReader.java       |   16 +-
 .../iceberg/spark/source/BatchDataReader.java      |   70 +-
 .../spark/source/EqualityDeleteRowReader.java      |    4 +-
 .../iceberg/spark/source/HasIcebergCatalog.java    |    1 -
 .../apache/iceberg/spark/source/IcebergSource.java |   69 +-
 .../iceberg/spark/source/InternalRowWrapper.java   |   13 +-
 .../apache/iceberg/spark/source/RowDataReader.java |   72 +-
 .../iceberg/spark/source/RowDataRewriter.java      |   81 +-
 .../spark/source/SerializableTableWithSize.java    |    7 +-
 .../iceberg/spark/source/SparkAppenderFactory.java |   67 +-
 .../apache/iceberg/spark/source/SparkBatch.java    |   37 +-
 .../iceberg/spark/source/SparkBatchQueryScan.java  |   81 +-
 .../spark/source/SparkCopyOnWriteOperation.java    |   32 +-
 .../iceberg/spark/source/SparkCopyOnWriteScan.java |   61 +-
 .../spark/source/SparkFileWriterFactory.java       |   82 +-
 .../iceberg/spark/source/SparkFilesScan.java       |   29 +-
 .../spark/source/SparkFilesScanBuilder.java        |    1 -
 .../iceberg/spark/source/SparkMetadataColumn.java  |    1 -
 .../spark/source/SparkMicroBatchStream.java        |   94 +-
 .../spark/source/SparkPartitionedFanoutWriter.java |   12 +-
 .../spark/source/SparkPartitionedWriter.java       |   14 +-
 .../spark/source/SparkPositionDeltaOperation.java  |   32 +-
 .../spark/source/SparkPositionDeltaWrite.java      |  199 +-
 .../source/SparkPositionDeltaWriteBuilder.java     |   49 +-
 .../source/SparkRowLevelOperationBuilder.java      |   21 +-
 .../org/apache/iceberg/spark/source/SparkScan.java |   43 +-
 .../iceberg/spark/source/SparkScanBuilder.java     |  128 +-
 .../apache/iceberg/spark/source/SparkTable.java    |  128 +-
 .../apache/iceberg/spark/source/SparkWrite.java    |  169 +-
 .../iceberg/spark/source/SparkWriteBuilder.java    |   54 +-
 .../iceberg/spark/source/StagedSparkTable.java     |    1 -
 .../org/apache/iceberg/spark/source/Stats.java     |    1 -
 .../iceberg/spark/source/StreamingOffset.java      |   29 +-
 .../iceberg/spark/source/StructInternalRow.java    |   63 +-
 .../analysis/NoSuchProcedureException.java         |   11 +-
 .../sql/connector/iceberg/catalog/Procedure.java   |   30 +-
 .../iceberg/catalog/ProcedureCatalog.java          |    7 +-
 .../iceberg/catalog/ProcedureParameter.java        |   17 +-
 .../iceberg/catalog/ProcedureParameterImpl.java    |   14 +-
 .../catalog/SupportsRowLevelOperations.java        |    9 +-
 .../connector/iceberg/write/DeltaBatchWrite.java   |    5 +-
 .../sql/connector/iceberg/write/DeltaWrite.java    |    9 +-
 .../connector/iceberg/write/DeltaWriteBuilder.java |    9 +-
 .../sql/connector/iceberg/write/DeltaWriter.java   |    5 +-
 .../iceberg/write/DeltaWriterFactory.java          |    5 +-
 .../iceberg/write/ExtendedLogicalWriteInfo.java    |   13 +-
 .../connector/iceberg/write/RowLevelOperation.java |   56 +-
 .../iceberg/write/RowLevelOperationBuilder.java    |    8 +-
 .../iceberg/write/RowLevelOperationInfo.java       |   13 +-
 .../sql/connector/iceberg/write/SupportsDelta.java |    9 +-
 .../test/java/org/apache/iceberg/KryoHelpers.java  |    7 +-
 .../java/org/apache/iceberg/TaskCheckHelper.java   |   86 +-
 .../apache/iceberg/TestDataFileSerialization.java  |   72 +-
 .../apache/iceberg/TestFileIOSerialization.java    |   31 +-
 .../TestHadoopMetricsContextSerialization.java     |    4 +-
 .../iceberg/TestManifestFileSerialization.java     |  151 +-
 .../apache/iceberg/TestScanTaskSerialization.java  |   61 +-
 .../org/apache/iceberg/TestTableSerialization.java |   47 +-
 .../apache/iceberg/spark/SparkCatalogConfig.java   |   31 +-
 .../apache/iceberg/spark/SparkCatalogTestBase.java |   37 +-
 .../org/apache/iceberg/spark/SparkTestBase.java    |  107 +-
 .../iceberg/spark/SparkTestBaseWithCatalog.java    |   20 +-
 .../iceberg/spark/TestFileRewriteCoordinator.java  |  117 +-
 .../org/apache/iceberg/spark/TestSpark3Util.java   |  103 +-
 .../iceberg/spark/TestSparkCachedTableCatalog.java |   18 +-
 .../iceberg/spark/TestSparkCatalogOperations.java  |    1 -
 .../TestSparkDistributionAndOrderingUtil.java      | 1812 +++++++-------
 .../org/apache/iceberg/spark/TestSparkFilters.java |  133 +-
 .../apache/iceberg/spark/TestSparkSchemaUtil.java  |   48 +-
 .../iceberg/spark/TestSparkSessionCatalog.java     |   45 +-
 .../apache/iceberg/spark/TestSparkTableUtil.java   |   45 +-
 .../iceberg/spark/TestSparkValueConverter.java     |   62 +-
 .../iceberg/spark/actions/TestCreateActions.java   |  490 ++--
 .../actions/TestDeleteReachableFilesAction.java    |  349 ++-
 .../spark/actions/TestExpireSnapshotsAction.java   | 1107 +++++----
 .../spark/actions/TestRemoveOrphanFilesAction.java |  604 ++---
 .../actions/TestRemoveOrphanFilesAction3.java      |   66 +-
 .../spark/actions/TestRewriteDataFilesAction.java  |  709 +++---
 .../spark/actions/TestRewriteManifestsAction.java  |  274 +--
 .../apache/iceberg/spark/data/AvroDataTest.java    |  277 ++-
 .../apache/iceberg/spark/data/GenericsHelpers.java |  135 +-
 .../org/apache/iceberg/spark/data/RandomData.java  |  103 +-
 .../org/apache/iceberg/spark/data/TestHelpers.java |  297 ++-
 .../apache/iceberg/spark/data/TestOrcWrite.java    |   24 +-
 .../iceberg/spark/data/TestParquetAvroReader.java  |  156 +-
 .../iceberg/spark/data/TestParquetAvroWriter.java  |   98 +-
 .../iceberg/spark/data/TestSparkAvroEnums.java     |   35 +-
 .../iceberg/spark/data/TestSparkAvroReader.java    |   20 +-
 .../iceberg/spark/data/TestSparkDateTimes.java     |    5 +-
 .../data/TestSparkOrcReadMetadataColumns.java      |   91 +-
 .../iceberg/spark/data/TestSparkOrcReader.java     |   55 +-
 .../data/TestSparkParquetReadMetadataColumns.java  |  121 +-
 .../iceberg/spark/data/TestSparkParquetReader.java |   41 +-
 .../iceberg/spark/data/TestSparkParquetWriter.java |   99 +-
 .../spark/data/TestSparkRecordOrcReaderWriter.java |   67 +-
 ...estParquetDictionaryEncodedVectorizedReads.java |   57 +-
 ...naryFallbackToPlainEncodingVectorizedReads.java |   23 +-
 .../vectorized/TestParquetVectorizedReads.java     |  245 +-
 .../spark/source/FilePathLastModifiedRecord.java   |   20 +-
 .../apache/iceberg/spark/source/LogMessage.java    |    1 -
 .../apache/iceberg/spark/source/ManualSource.java  |    7 +-
 .../apache/iceberg/spark/source/SimpleRecord.java  |    4 +-
 .../apache/iceberg/spark/source/TestAvroScan.java  |   32 +-
 .../spark/source/TestDataFrameWriterV2.java        |  126 +-
 .../iceberg/spark/source/TestDataFrameWrites.java  |  233 +-
 .../spark/source/TestDataSourceOptions.java        |  299 +--
 .../iceberg/spark/source/TestFilteredScan.java     |  388 +--
 .../spark/source/TestForwardCompatibility.java     |  101 +-
 .../iceberg/spark/source/TestIcebergSource.java    |    2 -
 .../source/TestIcebergSourceHadoopTables.java      |    1 -
 .../spark/source/TestIcebergSourceHiveTables.java  |    4 +-
 .../spark/source/TestIcebergSourceTablesBase.java  | 1503 +++++++-----
 .../iceberg/spark/source/TestIcebergSpark.java     |  103 +-
 .../spark/source/TestIdentityPartitionData.java    |  174 +-
 .../spark/source/TestInternalRowWrapper.java       |    7 +-
 .../TestMetadataTablesWithPartitionEvolution.java  |  425 ++--
 .../iceberg/spark/source/TestParquetScan.java      |   49 +-
 .../iceberg/spark/source/TestPartitionPruning.java |  319 ++-
 .../iceberg/spark/source/TestPartitionValues.java  |  312 +--
 .../iceberg/spark/source/TestPathIdentifier.java   |   25 +-
 .../iceberg/spark/source/TestReadProjection.java   |  439 ++--
 .../TestRequiredDistributionAndOrdering.java       |  312 +--
 .../iceberg/spark/source/TestRuntimeFiltering.java |  359 +--
 .../spark/source/TestSnapshotSelection.java        |  138 +-
 .../spark/source/TestSparkAppenderFactory.java     |    9 +-
 .../spark/source/TestSparkBaseDataReader.java      |   88 +-
 .../iceberg/spark/source/TestSparkCatalog.java     |    7 +-
 .../source/TestSparkCatalogCacheExpiration.java    |  102 +-
 .../source/TestSparkCatalogHadoopOverrides.java    |   71 +-
 .../iceberg/spark/source/TestSparkDataFile.java    |  108 +-
 .../iceberg/spark/source/TestSparkDataWrite.java   |  319 ++-
 .../spark/source/TestSparkFileWriterFactory.java   |    9 +-
 .../iceberg/spark/source/TestSparkFilesScan.java   |   47 +-
 .../spark/source/TestSparkMergingMetrics.java      |   39 +-
 .../spark/source/TestSparkMetadataColumns.java     |  184 +-
 .../spark/source/TestSparkPartitioningWriters.java |    9 +-
 .../source/TestSparkPositionDeltaWriters.java      |    9 +-
 .../spark/source/TestSparkReadProjection.java      |  224 +-
 .../spark/source/TestSparkReaderDeletes.java       |  405 +--
 .../source/TestSparkReaderWithBloomFilter.java     |  247 +-
 .../spark/source/TestSparkRollingFileWriters.java  |    9 +-
 .../iceberg/spark/source/TestSparkTable.java       |    1 -
 .../spark/source/TestSparkWriterMetrics.java       |    1 -
 .../iceberg/spark/source/TestStreamingOffset.java  |   20 +-
 .../spark/source/TestStructuredStreaming.java      |  148 +-
 .../spark/source/TestStructuredStreamingRead3.java |  277 +--
 .../apache/iceberg/spark/source/TestTables.java    |   11 +-
 .../spark/source/TestTimestampWithoutZone.java     |  169 +-
 .../spark/source/TestWriteMetricsConfig.java       |   84 +-
 .../iceberg/spark/source/ThreeColumnRecord.java    |   16 +-
 .../apache/iceberg/spark/sql/TestAlterTable.java   |  267 +-
 .../apache/iceberg/spark/sql/TestCreateTable.java  |  218 +-
 .../iceberg/spark/sql/TestCreateTableAsSelect.java |  401 +--
 .../apache/iceberg/spark/sql/TestDeleteFrom.java   |   77 +-
 .../apache/iceberg/spark/sql/TestDropTable.java    |   55 +-
 .../apache/iceberg/spark/sql/TestNamespaceSQL.java |   72 +-
 .../iceberg/spark/sql/TestPartitionedWrites.java   |  101 +-
 .../spark/sql/TestPartitionedWritesAsSelect.java   |   99 +-
 .../apache/iceberg/spark/sql/TestRefreshTable.java |    8 +-
 .../org/apache/iceberg/spark/sql/TestSelect.java   |  109 +-
 .../spark/sql/TestTimestampWithoutZone.java        |  168 +-
 .../iceberg/spark/sql/TestUnpartitionedWrites.java |   99 +-
 .../apache/iceberg/spark/extensions/Employee.java  |    4 +-
 .../spark/extensions/SparkExtensionsTestBase.java  |   40 +-
 .../SparkRowLevelOperationsTestBase.java           |  189 +-
 .../spark/extensions/TestAddFilesProcedure.java    |  583 +++--
 .../extensions/TestAlterTablePartitionFields.java  |  250 +-
 .../spark/extensions/TestAlterTableSchema.java     |   72 +-
 .../spark/extensions/TestAncestorsOfProcedure.java |   37 +-
 .../spark/extensions/TestCallStatementParser.java  |   92 +-
 .../TestCherrypickSnapshotProcedure.java           |   85 +-
 .../spark/extensions/TestConflictValidation.java   |  133 +-
 .../spark/extensions/TestCopyOnWriteDelete.java    |   10 +-
 .../spark/extensions/TestCopyOnWriteMerge.java     |   10 +-
 .../spark/extensions/TestCopyOnWriteUpdate.java    |   10 +-
 .../iceberg/spark/extensions/TestDelete.java       |  433 ++--
 .../extensions/TestExpireSnapshotsProcedure.java   |  275 ++-
 .../spark/extensions/TestIcebergExpressions.java   |   34 +-
 .../apache/iceberg/spark/extensions/TestMerge.java | 2568 +++++++++++---------
 .../spark/extensions/TestMergeOnReadDelete.java    |   61 +-
 .../spark/extensions/TestMergeOnReadMerge.java     |   13 +-
 .../spark/extensions/TestMergeOnReadUpdate.java    |   13 +-
 .../spark/extensions/TestMetadataTables.java       |  297 ++-
 .../extensions/TestMigrateTableProcedure.java      |   64 +-
 .../extensions/TestPublishChangesProcedure.java    |   77 +-
 .../extensions/TestRegisterTableProcedure.java     |   31 +-
 .../extensions/TestRemoveOrphanFilesProcedure.java |  249 +-
 .../TestRequiredDistributionAndOrdering.java       |  257 +-
 .../extensions/TestRewriteDataFilesProcedure.java  |  420 ++--
 .../extensions/TestRewriteManifestsProcedure.java  |  130 +-
 .../TestRollbackToSnapshotProcedure.java           |  141 +-
 .../TestRollbackToTimestampProcedure.java          |  140 +-
 .../TestSetCurrentSnapshotProcedure.java           |  131 +-
 .../TestSetWriteDistributionAndOrdering.java       |  130 +-
 .../extensions/TestSnapshotTableProcedure.java     |  132 +-
 .../iceberg/spark/extensions/TestUpdate.java       |  745 +++---
 .../apache/iceberg/spark/SparkBenchmarkUtil.java   |    7 +-
 .../action/IcebergSortCompactionBenchmark.java     |  203 +-
 .../iceberg/spark/action/RandomGeneratingUDF.java  |   16 +-
 .../SparkParquetReadersFlatDataBenchmark.java      |  136 +-
 .../SparkParquetReadersNestedDataBenchmark.java    |  138 +-
 .../SparkParquetWritersFlatDataBenchmark.java      |   59 +-
 .../SparkParquetWritersNestedDataBenchmark.java    |   62 +-
 .../org/apache/iceberg/spark/source/Action.java    |    1 -
 .../spark/source/IcebergSourceBenchmark.java       |   93 +-
 .../spark/source/IcebergSourceDeleteBenchmark.java |  152 +-
 .../source/IcebergSourceFlatDataBenchmark.java     |   26 +-
 .../source/IcebergSourceNestedDataBenchmark.java   |   25 +-
 .../IcebergSourceNestedListDataBenchmark.java      |   26 +-
 .../iceberg/spark/source/WritersBenchmark.java     |  166 +-
 .../spark/source/avro/AvroWritersBenchmark.java    |    6 +-
 .../IcebergSourceFlatAvroDataReadBenchmark.java    |   98 +-
 .../IcebergSourceNestedAvroDataReadBenchmark.java  |   98 +-
 .../orc/IcebergSourceFlatORCDataBenchmark.java     |   37 +-
 .../orc/IcebergSourceFlatORCDataReadBenchmark.java |  154 +-
 ...ebergSourceNestedListORCDataWriteBenchmark.java |   54 +-
 .../IcebergSourceNestedORCDataReadBenchmark.java   |  134 +-
 ...cebergSourceFlatParquetDataFilterBenchmark.java |   71 +-
 .../IcebergSourceFlatParquetDataReadBenchmark.java |  102 +-
 ...IcebergSourceFlatParquetDataWriteBenchmark.java |   15 +-
 ...gSourceNestedListParquetDataWriteBenchmark.java |   28 +-
 ...bergSourceNestedParquetDataFilterBenchmark.java |   71 +-
 ...cebergSourceNestedParquetDataReadBenchmark.java |  100 +-
 ...ebergSourceNestedParquetDataWriteBenchmark.java |   20 +-
 .../IcebergSourceParquetEqDeleteBenchmark.java     |   11 +-
 ...ebergSourceParquetMultiDeleteFileBenchmark.java |   11 +-
 .../IcebergSourceParquetPosDeleteBenchmark.java    |   11 +-
 ...gSourceParquetWithUnrelatedDeleteBenchmark.java |   20 +-
 .../source/parquet/ParquetWritersBenchmark.java    |    4 +-
 ...dDictionaryEncodedFlatParquetDataBenchmark.java |   38 +-
 .../VectorizedReadFlatParquetDataBenchmark.java    |  293 ++-
 .../java/org/apache/iceberg/spark/BaseCatalog.java |    7 +-
 .../org/apache/iceberg/spark/CommitMetadata.java   |   17 +-
 .../org/apache/iceberg/spark/ExtendedParser.java   |    7 +-
 .../iceberg/spark/FileRewriteCoordinator.java      |   20 +-
 .../iceberg/spark/FileScanTaskSetManager.java      |    7 +-
 .../org/apache/iceberg/spark/IcebergSpark.java     |   26 +-
 .../org/apache/iceberg/spark/JobGroupInfo.java     |    6 +-
 .../org/apache/iceberg/spark/JobGroupUtils.java    |   10 +-
 .../org/apache/iceberg/spark/PathIdentifier.java   |    8 +-
 .../iceberg/spark/PruneColumnsWithReordering.java  |   89 +-
 .../spark/PruneColumnsWithoutReordering.java       |   72 +-
 .../apache/iceberg/spark/RollbackStagedTable.java  |   51 +-
 .../org/apache/iceberg/spark/SortOrderToSpark.java |   37 +-
 .../java/org/apache/iceberg/spark/Spark3Util.java  |  431 ++--
 .../iceberg/spark/SparkCachedTableCatalog.java     |   27 +-
 .../org/apache/iceberg/spark/SparkCatalog.java     |  222 +-
 .../org/apache/iceberg/spark/SparkConfParser.java  |    4 +-
 .../org/apache/iceberg/spark/SparkDataFile.java    |   25 +-
 .../spark/SparkDistributionAndOrderingUtil.java    |   93 +-
 .../apache/iceberg/spark/SparkExceptionUtil.java   |    8 +-
 .../org/apache/iceberg/spark/SparkFilters.java     |  140 +-
 .../iceberg/spark/SparkFixupTimestampType.java     |   12 +-
 .../org/apache/iceberg/spark/SparkFixupTypes.java  |    9 +-
 .../org/apache/iceberg/spark/SparkReadConf.java    |  109 +-
 .../org/apache/iceberg/spark/SparkReadOptions.java |   14 +-
 .../apache/iceberg/spark/SparkSQLProperties.java   |    7 +-
 .../org/apache/iceberg/spark/SparkSchemaUtil.java  |  166 +-
 .../apache/iceberg/spark/SparkSessionCatalog.java  |   75 +-
 .../org/apache/iceberg/spark/SparkStructLike.java  |    1 -
 .../org/apache/iceberg/spark/SparkTableCache.java  |    1 -
 .../org/apache/iceberg/spark/SparkTableUtil.java   |  451 ++--
 .../org/apache/iceberg/spark/SparkTypeToType.java  |   23 +-
 .../org/apache/iceberg/spark/SparkTypeVisitor.java |   15 +-
 .../java/org/apache/iceberg/spark/SparkUtil.java   |  175 +-
 .../apache/iceberg/spark/SparkValueConverter.java  |    8 +-
 .../org/apache/iceberg/spark/SparkWriteConf.java   |  177 +-
 .../apache/iceberg/spark/SparkWriteOptions.java    |   17 +-
 .../org/apache/iceberg/spark/TypeToSparkType.java  |   11 +-
 .../actions/BaseSnapshotUpdateSparkAction.java     |    1 -
 .../iceberg/spark/actions/BaseSparkAction.java     |   63 +-
 .../actions/BaseTableCreationSparkAction.java      |   50 +-
 .../actions/DeleteOrphanFilesSparkAction.java      |  174 +-
 .../actions/DeleteReachableFilesSparkAction.java   |   98 +-
 .../spark/actions/ExpireSnapshotsSparkAction.java  |  158 +-
 .../iceberg/spark/actions/ManifestFileBean.java    |    1 -
 .../spark/actions/MigrateTableSparkAction.java     |   41 +-
 .../spark/actions/RewriteDataFilesSparkAction.java |  339 +--
 .../spark/actions/RewriteManifestsSparkAction.java |  146 +-
 .../spark/actions/SnapshotTableSparkAction.java    |   56 +-
 .../apache/iceberg/spark/actions/SparkActions.java |   19 +-
 .../spark/actions/SparkBinPackStrategy.java        |   23 +-
 .../iceberg/spark/actions/SparkSortStrategy.java   |   51 +-
 .../iceberg/spark/actions/SparkZOrderStrategy.java |  128 +-
 .../iceberg/spark/actions/SparkZOrderUDF.java      |  177 +-
 .../spark/data/AvroWithSparkSchemaVisitor.java     |   10 +-
 .../spark/data/ParquetWithSparkSchemaVisitor.java  |   83 +-
 .../apache/iceberg/spark/data/SparkAvroReader.java |   21 +-
 .../apache/iceberg/spark/data/SparkAvroWriter.java |   35 +-
 .../apache/iceberg/spark/data/SparkOrcReader.java  |   27 +-
 .../iceberg/spark/data/SparkOrcValueReaders.java   |   53 +-
 .../iceberg/spark/data/SparkOrcValueWriters.java   |   42 +-
 .../apache/iceberg/spark/data/SparkOrcWriter.java  |   46 +-
 .../iceberg/spark/data/SparkParquetReaders.java    |   89 +-
 .../iceberg/spark/data/SparkParquetWriters.java    |  104 +-
 .../iceberg/spark/data/SparkValueReaders.java      |   44 +-
 .../iceberg/spark/data/SparkValueWriters.java      |   42 +-
 .../vectorized/ArrowVectorAccessorFactory.java     |   21 +-
 .../data/vectorized/ArrowVectorAccessors.java      |    4 +-
 .../data/vectorized/ColumnVectorWithFilter.java    |    8 +-
 .../spark/data/vectorized/ColumnarBatchReader.java |   52 +-
 .../data/vectorized/ConstantColumnVector.java      |    4 +-
 .../data/vectorized/IcebergArrowColumnVector.java  |   18 +-
 .../data/vectorized/RowPositionColumnVector.java   |    4 +-
 .../data/vectorized/VectorizedSparkOrcReaders.java |  108 +-
 .../vectorized/VectorizedSparkParquetReaders.java  |   55 +-
 .../spark/procedures/AddFilesProcedure.java        |  198 +-
 .../spark/procedures/AncestorsOfProcedure.java     |   26 +-
 .../iceberg/spark/procedures/BaseProcedure.java    |   72 +-
 .../procedures/CherrypickSnapshotProcedure.java    |   45 +-
 .../spark/procedures/ExpireSnapshotsProcedure.java |   94 +-
 .../spark/procedures/MigrateTableProcedure.java    |   37 +-
 .../spark/procedures/PublishChangesProcedure.java  |   61 +-
 .../spark/procedures/RegisterTableProcedure.java   |   38 +-
 .../procedures/RemoveOrphanFilesProcedure.java     |   93 +-
 .../procedures/RewriteDataFilesProcedure.java      |  114 +-
 .../procedures/RewriteManifestsProcedure.java      |   46 +-
 .../procedures/RollbackToSnapshotProcedure.java    |   41 +-
 .../procedures/RollbackToTimestampProcedure.java   |   44 +-
 .../procedures/SetCurrentSnapshotProcedure.java    |   43 +-
 .../spark/procedures/SnapshotTableProcedure.java   |   46 +-
 .../iceberg/spark/procedures/SparkProcedures.java  |    5 +-
 .../iceberg/spark/source/BaseDataReader.java       |   16 +-
 .../iceberg/spark/source/BatchDataReader.java      |   70 +-
 .../spark/source/EqualityDeleteRowReader.java      |    4 +-
 .../iceberg/spark/source/HasIcebergCatalog.java    |    1 -
 .../apache/iceberg/spark/source/IcebergSource.java |   69 +-
 .../iceberg/spark/source/InternalRowWrapper.java   |   13 +-
 .../apache/iceberg/spark/source/RowDataReader.java |   72 +-
 .../iceberg/spark/source/RowDataRewriter.java      |   81 +-
 .../spark/source/SerializableTableWithSize.java    |    7 +-
 .../iceberg/spark/source/SparkAppenderFactory.java |   67 +-
 .../apache/iceberg/spark/source/SparkBatch.java    |   37 +-
 .../iceberg/spark/source/SparkBatchQueryScan.java  |   81 +-
 .../spark/source/SparkCopyOnWriteOperation.java    |   32 +-
 .../iceberg/spark/source/SparkCopyOnWriteScan.java |   61 +-
 .../spark/source/SparkFileWriterFactory.java       |   82 +-
 .../iceberg/spark/source/SparkFilesScan.java       |   29 +-
 .../spark/source/SparkFilesScanBuilder.java        |    1 -
 .../iceberg/spark/source/SparkMetadataColumn.java  |    1 -
 .../spark/source/SparkMicroBatchStream.java        |   94 +-
 .../spark/source/SparkPartitionedFanoutWriter.java |   12 +-
 .../spark/source/SparkPartitionedWriter.java       |   14 +-
 .../spark/source/SparkPositionDeltaOperation.java  |   40 +-
 .../spark/source/SparkPositionDeltaWrite.java      |  199 +-
 .../source/SparkPositionDeltaWriteBuilder.java     |   49 +-
 .../source/SparkRowLevelOperationBuilder.java      |   21 +-
 .../org/apache/iceberg/spark/source/SparkScan.java |   43 +-
 .../iceberg/spark/source/SparkScanBuilder.java     |  128 +-
 .../apache/iceberg/spark/source/SparkTable.java    |  128 +-
 .../apache/iceberg/spark/source/SparkWrite.java    |  169 +-
 .../iceberg/spark/source/SparkWriteBuilder.java    |   54 +-
 .../iceberg/spark/source/StagedSparkTable.java     |    1 -
 .../org/apache/iceberg/spark/source/Stats.java     |    1 -
 .../iceberg/spark/source/StreamingOffset.java      |   29 +-
 .../iceberg/spark/source/StructInternalRow.java    |   63 +-
 .../analysis/NoSuchProcedureException.java         |   11 +-
 .../sql/connector/iceberg/catalog/Procedure.java   |   30 +-
 .../iceberg/catalog/ProcedureCatalog.java          |    7 +-
 .../iceberg/catalog/ProcedureParameter.java        |   17 +-
 .../iceberg/catalog/ProcedureParameterImpl.java    |   14 +-
 .../connector/iceberg/write/DeltaBatchWrite.java   |    5 +-
 .../sql/connector/iceberg/write/DeltaWrite.java    |    9 +-
 .../connector/iceberg/write/DeltaWriteBuilder.java |    9 +-
 .../sql/connector/iceberg/write/DeltaWriter.java   |    5 +-
 .../iceberg/write/DeltaWriterFactory.java          |    5 +-
 .../iceberg/write/ExtendedLogicalWriteInfo.java    |   13 +-
 .../sql/connector/iceberg/write/SupportsDelta.java |    9 +-
 .../test/java/org/apache/iceberg/KryoHelpers.java  |    7 +-
 .../java/org/apache/iceberg/TaskCheckHelper.java   |   86 +-
 .../apache/iceberg/TestDataFileSerialization.java  |   72 +-
 .../apache/iceberg/TestFileIOSerialization.java    |   31 +-
 .../TestHadoopMetricsContextSerialization.java     |    4 +-
 .../iceberg/TestManifestFileSerialization.java     |  151 +-
 .../apache/iceberg/TestScanTaskSerialization.java  |   61 +-
 .../org/apache/iceberg/TestTableSerialization.java |   47 +-
 .../apache/iceberg/spark/SparkCatalogConfig.java   |   31 +-
 .../apache/iceberg/spark/SparkCatalogTestBase.java |   37 +-
 .../org/apache/iceberg/spark/SparkTestBase.java    |  109 +-
 .../iceberg/spark/SparkTestBaseWithCatalog.java    |   20 +-
 .../iceberg/spark/TestFileRewriteCoordinator.java  |  117 +-
 .../org/apache/iceberg/spark/TestSpark3Util.java   |  103 +-
 .../iceberg/spark/TestSparkCachedTableCatalog.java |   18 +-
 .../iceberg/spark/TestSparkCatalogOperations.java  |    1 -
 .../TestSparkDistributionAndOrderingUtil.java      | 1812 +++++++-------
 .../org/apache/iceberg/spark/TestSparkFilters.java |  133 +-
 .../apache/iceberg/spark/TestSparkSchemaUtil.java  |   44 +-
 .../iceberg/spark/TestSparkSessionCatalog.java     |   45 +-
 .../apache/iceberg/spark/TestSparkTableUtil.java   |   45 +-
 .../iceberg/spark/TestSparkValueConverter.java     |   62 +-
 .../iceberg/spark/actions/TestCreateActions.java   |  490 ++--
 .../actions/TestDeleteReachableFilesAction.java    |  349 ++-
 .../spark/actions/TestExpireSnapshotsAction.java   | 1107 +++++----
 .../spark/actions/TestRemoveOrphanFilesAction.java |  604 ++---
 .../actions/TestRemoveOrphanFilesAction3.java      |   66 +-
 .../spark/actions/TestRewriteDataFilesAction.java  |  731 +++---
 .../spark/actions/TestRewriteManifestsAction.java  |  274 +--
 .../apache/iceberg/spark/data/AvroDataTest.java    |  279 ++-
 .../apache/iceberg/spark/data/GenericsHelpers.java |  135 +-
 .../org/apache/iceberg/spark/data/RandomData.java  |  103 +-
 .../org/apache/iceberg/spark/data/TestHelpers.java |  297 ++-
 .../apache/iceberg/spark/data/TestOrcWrite.java    |   24 +-
 .../iceberg/spark/data/TestParquetAvroReader.java  |  156 +-
 .../iceberg/spark/data/TestParquetAvroWriter.java  |   98 +-
 .../iceberg/spark/data/TestSparkAvroEnums.java     |   35 +-
 .../iceberg/spark/data/TestSparkAvroReader.java    |   20 +-
 .../iceberg/spark/data/TestSparkDateTimes.java     |    5 +-
 .../data/TestSparkOrcReadMetadataColumns.java      |   91 +-
 .../iceberg/spark/data/TestSparkOrcReader.java     |   55 +-
 .../data/TestSparkParquetReadMetadataColumns.java  |  121 +-
 .../iceberg/spark/data/TestSparkParquetReader.java |   41 +-
 .../iceberg/spark/data/TestSparkParquetWriter.java |   99 +-
 .../spark/data/TestSparkRecordOrcReaderWriter.java |   67 +-
 ...estParquetDictionaryEncodedVectorizedReads.java |   57 +-
 ...naryFallbackToPlainEncodingVectorizedReads.java |   23 +-
 .../vectorized/TestParquetVectorizedReads.java     |  248 +-
 .../spark/source/FilePathLastModifiedRecord.java   |   20 +-
 .../apache/iceberg/spark/source/LogMessage.java    |    1 -
 .../apache/iceberg/spark/source/ManualSource.java  |    7 +-
 .../apache/iceberg/spark/source/SimpleRecord.java  |    4 +-
 .../apache/iceberg/spark/source/TestAvroScan.java  |   32 +-
 .../spark/source/TestDataFrameWriterV2.java        |  126 +-
 .../iceberg/spark/source/TestDataFrameWrites.java  |  233 +-
 .../spark/source/TestDataSourceOptions.java        |  299 +--
 .../iceberg/spark/source/TestFilteredScan.java     |  388 +--
 .../spark/source/TestForwardCompatibility.java     |  101 +-
 .../iceberg/spark/source/TestIcebergSource.java    |    2 -
 .../source/TestIcebergSourceHadoopTables.java      |    1 -
 .../spark/source/TestIcebergSourceHiveTables.java  |    4 +-
 .../spark/source/TestIcebergSourceTablesBase.java  | 1503 +++++++-----
 .../iceberg/spark/source/TestIcebergSpark.java     |  103 +-
 .../spark/source/TestIdentityPartitionData.java    |  174 +-
 .../spark/source/TestInternalRowWrapper.java       |    7 +-
 .../TestMetadataTablesWithPartitionEvolution.java  |  425 ++--
 .../iceberg/spark/source/TestParquetScan.java      |   49 +-
 .../iceberg/spark/source/TestPartitionPruning.java |  319 ++-
 .../iceberg/spark/source/TestPartitionValues.java  |  312 +--
 .../iceberg/spark/source/TestPathIdentifier.java   |   25 +-
 .../iceberg/spark/source/TestReadProjection.java   |  439 ++--
 .../TestRequiredDistributionAndOrdering.java       |  312 +--
 .../iceberg/spark/source/TestRuntimeFiltering.java |  359 +--
 .../spark/source/TestSnapshotSelection.java        |  138 +-
 .../spark/source/TestSparkAppenderFactory.java     |    9 +-
 .../spark/source/TestSparkBaseDataReader.java      |   88 +-
 .../iceberg/spark/source/TestSparkCatalog.java     |    7 +-
 .../source/TestSparkCatalogCacheExpiration.java    |  102 +-
 .../source/TestSparkCatalogHadoopOverrides.java    |   71 +-
 .../iceberg/spark/source/TestSparkDataFile.java    |  108 +-
 .../iceberg/spark/source/TestSparkDataWrite.java   |  319 ++-
 .../spark/source/TestSparkFileWriterFactory.java   |    9 +-
 .../iceberg/spark/source/TestSparkFilesScan.java   |   47 +-
 .../spark/source/TestSparkMergingMetrics.java      |   39 +-
 .../spark/source/TestSparkMetadataColumns.java     |  184 +-
 .../spark/source/TestSparkPartitioningWriters.java |    9 +-
 .../source/TestSparkPositionDeltaWriters.java      |    9 +-
 .../spark/source/TestSparkReadProjection.java      |  224 +-
 .../spark/source/TestSparkReaderDeletes.java       |  405 +--
 .../source/TestSparkReaderWithBloomFilter.java     |  247 +-
 .../spark/source/TestSparkRollingFileWriters.java  |    9 +-
 .../iceberg/spark/source/TestSparkTable.java       |    1 -
 .../spark/source/TestSparkWriterMetrics.java       |    1 -
 .../iceberg/spark/source/TestStreamingOffset.java  |   20 +-
 .../spark/source/TestStructuredStreaming.java      |  148 +-
 .../spark/source/TestStructuredStreamingRead3.java |  277 +--
 .../apache/iceberg/spark/source/TestTables.java    |   11 +-
 .../spark/source/TestTimestampWithoutZone.java     |  169 +-
 .../spark/source/TestWriteMetricsConfig.java       |   84 +-
 .../iceberg/spark/source/ThreeColumnRecord.java    |   16 +-
 .../apache/iceberg/spark/sql/TestAlterTable.java   |  267 +-
 .../apache/iceberg/spark/sql/TestCreateTable.java  |  218 +-
 .../iceberg/spark/sql/TestCreateTableAsSelect.java |  401 +--
 .../apache/iceberg/spark/sql/TestDeleteFrom.java   |   71 +-
 .../apache/iceberg/spark/sql/TestDropTable.java    |   55 +-
 .../apache/iceberg/spark/sql/TestNamespaceSQL.java |   72 +-
 .../iceberg/spark/sql/TestPartitionedWrites.java   |  101 +-
 .../spark/sql/TestPartitionedWritesAsSelect.java   |   99 +-
 .../apache/iceberg/spark/sql/TestRefreshTable.java |    8 +-
 .../org/apache/iceberg/spark/sql/TestSelect.java   |  169 +-
 .../spark/sql/TestTimestampWithoutZone.java        |  168 +-
 .../iceberg/spark/sql/TestUnpartitionedWrites.java |   99 +-
 3057 files changed, 157884 insertions(+), 123067 deletions(-)

diff --git a/.baseline/checkstyle/checkstyle.xml b/.baseline/checkstyle/checkstyle.xml
index 9b22c83c24..1ab5816448 100644
--- a/.baseline/checkstyle/checkstyle.xml
+++ b/.baseline/checkstyle/checkstyle.xml
@@ -8,10 +8,6 @@
     <property name="severity" value="error"/>
 
     <module name="FileTabCharacter"/> <!-- Java Style Guide: Whitespace characters -->
-    <module name="LineLength"> <!-- Java Style Guide: No line-wrapping -->
-        <property name="max" value="120"/>
-        <property name="ignorePattern" value="^package.*|^import.*|a href|href|http://|https://|ftp://"/>
-    </module>
     <module name="NewlineAtEndOfFile"> <!-- Java Style Guide: Line ending: LF -->
         <property name="lineSeparator" value="lf"/>
     </module>
@@ -148,10 +144,6 @@
             <property name="exceptionVariableName" value="expected"/>
         </module>
         <module name="EmptyForInitializerPad"/> <!-- Java Style Guide: Horizontal whitespace -->
-        <module name="EmptyLineSeparator"> <!-- Java Style Guide: Source file structure -->
-            <property name="tokens" value="PACKAGE_DEF, IMPORT, CLASS_DEF, ENUM_DEF, INTERFACE_DEF, CTOR_DEF, STATIC_INIT, INSTANCE_INIT, VARIABLE_DEF"/>
-            <property name="allowNoEmptyLineBetweenFields" value="true"/>
-        </module>
         <module name="EmptyStatement"/> <!-- Java Style Guide: One statement per line -->
         <module name="EqualsHashCode"/>
         <module name="FallThrough"/> <!-- Java Style Guide: Fall-through: commented -->
@@ -255,20 +247,7 @@
             <property name="illegalClassNames" value="com.google.common.cache.CacheBuilder, com.google.common.cache.Cache, com.google.common.cache.LoadingCache"/>
             <message key="illegal.type" value="Do not use Guava caches, they are outperformed by and harder to use than Caffeine caches"/>
         </module>
-        <module name="ImportOrder"> <!-- Java Style Guide: Ordering and spacing -->
-            <property name="groups" value="/.*/"/>
-            <property name="option" value="bottom"/>
-            <property name="separated" value="true"/>
-            <property name="sortStaticImportsAlphabetically" value="true"/>
-        </module>
-        <module name="Indentation"> <!-- Java Style Guide: Block indentation: +4 spaces -->
-            <property name="basicOffset" value="2"/>
-            <property name="arrayInitIndent" value="4"/>
-            <property name="lineWrappingIndentation" value="4"/>
-            <property name="caseIndent" value="2"/>
-        </module>
         <module name="InnerAssignment"/> <!-- Java Coding Guidelines: Inner assignments: Not used -->
-        <module name="LeftCurly"/> <!-- Java Style Guide: Nonempty blocks: K & R style -->
         <module name="MemberName"> <!-- Java Style Guide: Non-constant field names -->
             <property name="format" value="^[a-z][a-zA-Z0-9]+$"/>
             <message key="name.invalidPattern" value="Member name ''{0}'' must match pattern ''{1}''."/>
@@ -287,19 +266,8 @@
         <module name="NoClone"/> <!-- Java Coding Guidelines: Never override Object#finalize or Object#clone -->
         <module name="NoFinalizer"/> <!-- Java Coding Guidelines: Never override Object#finalize -->
         <module name="NoLineWrap"/> <!-- Java Style Guide: No line-wrapping -->
-        <module name="NoWhitespaceAfter"> <!-- Java Style Guide: Horizontal whitespace -->
-            <property name="allowLineBreaks" value="false"/>
-            <property name="tokens" value="BNOT,DEC,DOT,INC,LNOT,UNARY_MINUS,UNARY_PLUS"/>
-        </module>
-        <module name="NoWhitespaceBefore"> <!-- Java Style Guide: Horizontal whitespace -->
-            <property name="allowLineBreaks" value="true"/>
-        </module>
         <module name="OneStatementPerLine"/> <!-- Java Style Guide: One statement per line -->
         <module name="OneTopLevelClass"/> <!-- Java Style Guide: Exactly one top-level class declaration -->
-        <module name="OperatorWrap"> <!-- Java Style Guide: Where to break -->
-            <property name="option" value="EOL"/>
-            <property name="tokens" value="BAND, BOR, BSR, BXOR, DIV, EQUAL, GE, GT, LAND, LE, LITERAL_INSTANCEOF, LOR, LT, MINUS, MOD, NOT_EQUAL, PLUS, QUESTION, SL, SR, STAR "/>
-        </module>
         <module name="OuterTypeFilename"/> <!-- Java Style Guide: File name -->
         <module name="PackageAnnotation"/> <!-- Java Style Guide: Package statement -->
         <module name="PackageDeclaration"/> <!-- Java Style Guide: Package statement -->
@@ -410,22 +378,6 @@
             <property name="format" value="(void setUp\(\))|(void setup\(\))|(void setupStatic\(\))|(void setUpStatic\(\))|(void beforeTest\(\))|(void teardown\(\))|(void tearDown\(\))|(void beforeStatic\(\))|(void afterStatic\(\))"/>
             <property name="message" value="Test setup/teardown methods are called before(), beforeClass(), after(), afterClass(), but not setUp, teardown, etc."/>
         </module>
-        <module name="RightCurly"> <!-- Java Style Guide: Nonempty blocks: K & R style -->
-            <property name="option" value="same"/>
-            <property name="tokens" value="LITERAL_TRY, LITERAL_CATCH, LITERAL_FINALLY, LITERAL_IF, LITERAL_ELSE, LITERAL_DO"/>
-        </module>
-        <module name="RightCurly"> <!-- Java Style Guide: Nonempty blocks: K & R style -->
-            <property name="option" value="alone"/>
-            <property name="tokens" value="CLASS_DEF, METHOD_DEF, CTOR_DEF, LITERAL_FOR, LITERAL_WHILE, STATIC_INIT, INSTANCE_INIT"/>
-        </module>
-        <module name="SeparatorWrap"> <!-- Java Style Guide: Where to break -->
-            <property name="tokens" value="DOT"/>
-            <property name="option" value="nl"/>
-        </module>
-        <module name="SeparatorWrap"> <!-- Java Style Guide: Where to break -->
-            <property name="tokens" value="COMMA"/>
-            <property name="option" value="EOL"/>
-        </module>
         <module name="SimplifyBooleanExpression"/> <!-- Java Coding Guidelines: Keep Boolean expressions simple -->
         <module name="SimplifyBooleanReturn"/> <!-- Java Coding Guidelines: Keep Boolean expressions simple -->
         <module name="StaticVariableName"/> <!-- Java Style Guide: Naming -->
@@ -448,16 +400,6 @@
         </module>
         <module name="UpperEll"/> <!-- Java Style Guide: Numeric Literals -->
         <module name="VisibilityModifier"/> <!-- Java Coding Guidelines: Minimize mutability -->
-        <module name="WhitespaceAfter"/> <!-- Java Style Guide: Horizontal whitespace -->
-        <module name="WhitespaceAround"> <!-- Java Style Guide: Horizontal whitespace -->
-            <property name="allowEmptyConstructors" value="true"/>
-            <property name="allowEmptyMethods" value="true"/>
-            <property name="allowEmptyTypes" value="true"/>
-            <property name="allowEmptyLoops" value="true"/>
-            <property name="ignoreEnhancedForColon" value="false"/>
-            <message key="ws.notFollowed" value="WhitespaceAround: ''{0}'' is not followed by whitespace. Empty blocks may only be represented as '{}' when not part of a multi-block statement (4.1.3)"/>
-            <message key="ws.notPreceded" value="WhitespaceAround: ''{0}'' is not preceded with whitespace."/>
-        </module>
 
         <!-- Stricter checks begin: delete some or all of the following for faster prototyping, but please restore before pushing to production. -->
 
@@ -505,7 +447,6 @@
             <message key="name.invalidPattern" value="Parameter name ''{0}'' must match pattern ''{1}''."/>
             <property name="ignoreOverridden" value="true"/>
         </module>
-        <module name="SingleLineJavadoc"/> <!-- Java Style Guide: General form -->
 
         <!-- Stricter checks end -->
     </module>
diff --git a/api/src/main/java/org/apache/iceberg/SortDirection.java b/.baseline/copyright/copyright-header-java.txt
similarity index 91%
copy from api/src/main/java/org/apache/iceberg/SortDirection.java
copy to .baseline/copyright/copyright-header-java.txt
index 3be60b6520..042f3ce1f3 100644
--- a/api/src/main/java/org/apache/iceberg/SortDirection.java
+++ b/.baseline/copyright/copyright-header-java.txt
@@ -16,9 +16,3 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
-package org.apache.iceberg;
-
-public enum SortDirection {
-  ASC, DESC
-}
diff --git a/README.md b/README.md
index e5b7cf6e3f..1b3efee2bb 100644
--- a/README.md
+++ b/README.md
@@ -57,6 +57,7 @@ Iceberg is built using Gradle with Java 1.8 or Java 11.
 
 * To invoke a build and run tests: `./gradlew build`
 * To skip tests: `./gradlew build -x test -x integrationTest`
+* To fix code style: `./gradlew spotlessApply`
 
 Iceberg table support is organized in library modules:
 
@@ -69,7 +70,7 @@ Iceberg table support is organized in library modules:
 * `iceberg-hive-metastore` is an implementation of Iceberg tables backed by the Hive metastore Thrift client
 * `iceberg-data` is an optional module for working with tables directly from JVM applications
 
-This project Iceberg also has modules for adding Iceberg support to processing engines:
+Iceberg also has modules for adding Iceberg support to processing engines:
 
 * `iceberg-spark2` is an implementation of Spark's Datasource V2 API in 2.4 for Iceberg (use iceberg-spark-runtime for a shaded version)
 * `iceberg-spark3` is an implementation of Spark's Datasource V2 API in 3.0 for Iceberg (use iceberg-spark3-runtime for a shaded version)
diff --git a/aliyun/src/main/java/org/apache/iceberg/aliyun/AliyunClientFactories.java b/aliyun/src/main/java/org/apache/iceberg/aliyun/AliyunClientFactories.java
index c512d718da..5807f9bfe1 100644
--- a/aliyun/src/main/java/org/apache/iceberg/aliyun/AliyunClientFactories.java
+++ b/aliyun/src/main/java/org/apache/iceberg/aliyun/AliyunClientFactories.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun;
 
 import com.aliyun.oss.OSS;
@@ -28,35 +27,41 @@ import org.apache.iceberg.util.PropertyUtil;
 
 public class AliyunClientFactories {
 
-  private static final AliyunClientFactory ALIYUN_CLIENT_FACTORY_DEFAULT = new DefaultAliyunClientFactory();
+  private static final AliyunClientFactory ALIYUN_CLIENT_FACTORY_DEFAULT =
+      new DefaultAliyunClientFactory();
 
-  private AliyunClientFactories() {
-  }
+  private AliyunClientFactories() {}
 
   public static AliyunClientFactory defaultFactory() {
     return ALIYUN_CLIENT_FACTORY_DEFAULT;
   }
 
   public static AliyunClientFactory from(Map<String, String> properties) {
-    String factoryImpl = PropertyUtil.propertyAsString(
-        properties, AliyunProperties.CLIENT_FACTORY, DefaultAliyunClientFactory.class.getName());
+    String factoryImpl =
+        PropertyUtil.propertyAsString(
+            properties,
+            AliyunProperties.CLIENT_FACTORY,
+            DefaultAliyunClientFactory.class.getName());
     return loadClientFactory(factoryImpl, properties);
   }
 
   /**
    * Load an implemented {@link AliyunClientFactory} based on the class name, and initialize it.
    *
-   * @param impl       the class name.
+   * @param impl the class name.
    * @param properties to initialize the factory.
    * @return an initialized {@link AliyunClientFactory}.
    */
-  private static AliyunClientFactory loadClientFactory(String impl, Map<String, String> properties) {
+  private static AliyunClientFactory loadClientFactory(
+      String impl, Map<String, String> properties) {
     DynConstructors.Ctor<AliyunClientFactory> ctor;
     try {
       ctor = DynConstructors.builder(AliyunClientFactory.class).hiddenImpl(impl).buildChecked();
     } catch (NoSuchMethodException e) {
-      throw new IllegalArgumentException(String.format(
-          "Cannot initialize AliyunClientFactory, missing no-arg constructor: %s", impl), e);
+      throw new IllegalArgumentException(
+          String.format(
+              "Cannot initialize AliyunClientFactory, missing no-arg constructor: %s", impl),
+          e);
     }
 
     AliyunClientFactory factory;
@@ -64,7 +69,10 @@ public class AliyunClientFactories {
       factory = ctor.newInstance();
     } catch (ClassCastException e) {
       throw new IllegalArgumentException(
-          String.format("Cannot initialize AliyunClientFactory, %s does not implement AliyunClientFactory.", impl), e);
+          String.format(
+              "Cannot initialize AliyunClientFactory, %s does not implement AliyunClientFactory.",
+              impl),
+          e);
     }
 
     factory.initialize(properties);
@@ -74,16 +82,19 @@ public class AliyunClientFactories {
   static class DefaultAliyunClientFactory implements AliyunClientFactory {
     private AliyunProperties aliyunProperties;
 
-    DefaultAliyunClientFactory() {
-    }
+    DefaultAliyunClientFactory() {}
 
     @Override
     public OSS newOSSClient() {
       Preconditions.checkNotNull(
-          aliyunProperties, "Cannot create aliyun oss client before initializing the AliyunClientFactory.");
-
-      return new OSSClientBuilder().build(
-          aliyunProperties.ossEndpoint(), aliyunProperties.accessKeyId(), aliyunProperties.accessKeySecret());
+          aliyunProperties,
+          "Cannot create aliyun oss client before initializing the AliyunClientFactory.");
+
+      return new OSSClientBuilder()
+          .build(
+              aliyunProperties.ossEndpoint(),
+              aliyunProperties.accessKeyId(),
+              aliyunProperties.accessKeySecret());
     }
 
     @Override
diff --git a/aliyun/src/main/java/org/apache/iceberg/aliyun/AliyunClientFactory.java b/aliyun/src/main/java/org/apache/iceberg/aliyun/AliyunClientFactory.java
index 6657a2234f..67490db578 100644
--- a/aliyun/src/main/java/org/apache/iceberg/aliyun/AliyunClientFactory.java
+++ b/aliyun/src/main/java/org/apache/iceberg/aliyun/AliyunClientFactory.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun;
 
 import com.aliyun.oss.OSS;
@@ -39,8 +38,6 @@ public interface AliyunClientFactory extends Serializable {
    */
   void initialize(Map<String, String> properties);
 
-  /**
-   * Returns an initialized {@link AliyunProperties}
-   */
+  /** Returns an initialized {@link AliyunProperties} */
   AliyunProperties aliyunProperties();
 }
diff --git a/aliyun/src/main/java/org/apache/iceberg/aliyun/AliyunProperties.java b/aliyun/src/main/java/org/apache/iceberg/aliyun/AliyunProperties.java
index 7474e6a3b5..623b55263a 100644
--- a/aliyun/src/main/java/org/apache/iceberg/aliyun/AliyunProperties.java
+++ b/aliyun/src/main/java/org/apache/iceberg/aliyun/AliyunProperties.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun;
 
 import java.io.Serializable;
@@ -26,41 +25,43 @@ import org.apache.iceberg.util.PropertyUtil;
 
 public class AliyunProperties implements Serializable {
   /**
-   * The domain name used to access OSS. OSS uses HTTP Restful APIs to provide services. Different regions are accessed
-   * by using different endpoints. For the same region, access over the internal network or over the Internet also uses
-   * different endpoints. For more information, see:
+   * The domain name used to access OSS. OSS uses HTTP Restful APIs to provide services. Different
+   * regions are accessed by using different endpoints. For the same region, access over the
+   * internal network or over the Internet also uses different endpoints. For more information, see:
    * https://www.alibabacloud.com/help/doc-detail/31837.htm
    */
   public static final String OSS_ENDPOINT = "oss.endpoint";
 
   /**
-   * Aliyun uses an AccessKey pair, which includes an AccessKey ID and an AccessKey secret to implement symmetric
-   * encryption and verify the identity of a requester. The AccessKey ID is used to identify a user.
-   * <p>
-   * For more information about how to obtain an AccessKey pair, see:
+   * Aliyun uses an AccessKey pair, which includes an AccessKey ID and an AccessKey secret to
+   * implement symmetric encryption and verify the identity of a requester. The AccessKey ID is used
+   * to identify a user.
+   *
+   * <p>For more information about how to obtain an AccessKey pair, see:
    * https://www.alibabacloud.com/help/doc-detail/53045.htm
    */
   public static final String CLIENT_ACCESS_KEY_ID = "client.access-key-id";
 
   /**
-   * Aliyun uses an AccessKey pair, which includes an AccessKey ID and an AccessKey secret to implement symmetric
-   * encryption and verify the identity of a requester. The AccessKey secret is used to encrypt and verify the
-   * signature string.
-   * <p>
-   * For more information about how to obtain an AccessKey pair, see:
+   * Aliyun uses an AccessKey pair, which includes an AccessKey ID and an AccessKey secret to
+   * implement symmetric encryption and verify the identity of a requester. The AccessKey secret is
+   * used to encrypt and verify the signature string.
+   *
+   * <p>For more information about how to obtain an AccessKey pair, see:
    * https://www.alibabacloud.com/help/doc-detail/53045.htm
    */
   public static final String CLIENT_ACCESS_KEY_SECRET = "client.access-key-secret";
 
   /**
-   * The implementation class of {@link AliyunClientFactory} to customize Aliyun client configurations.
-   * If set, all Aliyun clients will be initialized by the specified factory.
-   * If not set, {@link AliyunClientFactories#defaultFactory()} is used as default factory.
+   * The implementation class of {@link AliyunClientFactory} to customize Aliyun client
+   * configurations. If set, all Aliyun clients will be initialized by the specified factory. If not
+   * set, {@link AliyunClientFactories#defaultFactory()} is used as default factory.
    */
   public static final String CLIENT_FACTORY = "client.factory-impl";
 
   /**
-   * Location to put staging files for uploading to OSS, defaults to the directory value of java.io.tmpdir.
+   * Location to put staging files for uploading to OSS, defaults to the directory value of
+   * java.io.tmpdir.
    */
   public static final String OSS_STAGING_DIRECTORY = "oss.staging-dir";
 
@@ -79,8 +80,9 @@ public class AliyunProperties implements Serializable {
     this.accessKeyId = properties.get(CLIENT_ACCESS_KEY_ID);
     this.accessKeySecret = properties.get(CLIENT_ACCESS_KEY_SECRET);
 
-    this.ossStagingDirectory = PropertyUtil.propertyAsString(properties, OSS_STAGING_DIRECTORY,
-        System.getProperty("java.io.tmpdir"));
+    this.ossStagingDirectory =
+        PropertyUtil.propertyAsString(
+            properties, OSS_STAGING_DIRECTORY, System.getProperty("java.io.tmpdir"));
   }
 
   public String ossEndpoint() {
diff --git a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/BaseOSSFile.java b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/BaseOSSFile.java
index 48aca1523a..d957e82f92 100644
--- a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/BaseOSSFile.java
+++ b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/BaseOSSFile.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
 import com.aliyun.oss.OSS;
@@ -62,8 +61,8 @@ abstract class BaseOSSFile {
       return objectMetadata() != null;
     } catch (OSSException e) {
 
-      if (e.getErrorCode().equals(OSSErrorCode.NO_SUCH_BUCKET) ||
-          e.getErrorCode().equals(OSSErrorCode.NO_SUCH_KEY)) {
+      if (e.getErrorCode().equals(OSSErrorCode.NO_SUCH_BUCKET)
+          || e.getErrorCode().equals(OSSErrorCode.NO_SUCH_KEY)) {
         return false;
       }
 
@@ -85,8 +84,6 @@ abstract class BaseOSSFile {
 
   @Override
   public String toString() {
-    return MoreObjects.toStringHelper(this)
-        .add("file", uri)
-        .toString();
+    return MoreObjects.toStringHelper(this).add("file", uri).toString();
   }
 }
diff --git a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSFileIO.java b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSFileIO.java
index 61570ef94c..be85b93a75 100644
--- a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSFileIO.java
+++ b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSFileIO.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
 import com.aliyun.oss.OSS;
@@ -36,14 +35,15 @@ import org.slf4j.LoggerFactory;
 
 /**
  * FileIO implementation backed by OSS.
- * <p>
- * Locations used must follow the conventions for OSS URIs (e.g. oss://bucket/path...).
- * URIs with scheme https are also treated as oss file paths.
- * Using this FileIO with other schemes with result in {@link org.apache.iceberg.exceptions.ValidationException}
+ *
+ * <p>Locations used must follow the conventions for OSS URIs (e.g. oss://bucket/path...). URIs with
+ * scheme https are also treated as oss file paths. Using this FileIO with other schemes with result
+ * in {@link org.apache.iceberg.exceptions.ValidationException}
  */
 public class OSSFileIO implements FileIO {
   private static final Logger LOG = LoggerFactory.getLogger(OSSFileIO.class);
-  private static final String DEFAULT_METRICS_IMPL = "org.apache.iceberg.hadoop.HadoopMetricsContext";
+  private static final String DEFAULT_METRICS_IMPL =
+      "org.apache.iceberg.hadoop.HadoopMetricsContext";
 
   private SerializableSupplier<OSS> oss;
   private AliyunProperties aliyunProperties;
@@ -53,16 +53,16 @@ public class OSSFileIO implements FileIO {
 
   /**
    * No-arg constructor to load the FileIO dynamically.
-   * <p>
-   * All fields are initialized by calling {@link OSSFileIO#initialize(Map)} later.
+   *
+   * <p>All fields are initialized by calling {@link OSSFileIO#initialize(Map)} later.
    */
-  public OSSFileIO() {
-  }
+  public OSSFileIO() {}
 
   /**
    * Constructor with custom oss supplier and default aliyun properties.
-   * <p>
-   * Calling {@link OSSFileIO#initialize(Map)} will overwrite information set in this constructor.
+   *
+   * <p>Calling {@link OSSFileIO#initialize(Map)} will overwrite information set in this
+   * constructor.
    *
    * @param oss oss supplier
    */
@@ -107,12 +107,17 @@ public class OSSFileIO implements FileIO {
     // Report Hadoop metrics if Hadoop is available
     try {
       DynConstructors.Ctor<MetricsContext> ctor =
-          DynConstructors.builder(MetricsContext.class).hiddenImpl(DEFAULT_METRICS_IMPL, String.class).buildChecked();
+          DynConstructors.builder(MetricsContext.class)
+              .hiddenImpl(DEFAULT_METRICS_IMPL, String.class)
+              .buildChecked();
       MetricsContext context = ctor.newInstance("oss");
       context.initialize(properties);
       this.metrics = context;
     } catch (NoClassDefFoundError | NoSuchMethodException | ClassCastException e) {
-      LOG.warn("Unable to load metrics class: '{}', falling back to null metrics", DEFAULT_METRICS_IMPL, e);
+      LOG.warn(
+          "Unable to load metrics class: '{}', falling back to null metrics",
+          DEFAULT_METRICS_IMPL,
+          e);
     }
   }
 
diff --git a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSInputFile.java b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSInputFile.java
index 4c52427212..40ab3a021e 100644
--- a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSInputFile.java
+++ b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSInputFile.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
 import com.aliyun.oss.OSS;
@@ -26,9 +25,7 @@ import org.apache.iceberg.io.InputFile;
 import org.apache.iceberg.io.SeekableInputStream;
 import org.apache.iceberg.metrics.MetricsContext;
 
-/**
- * @deprecated moving to package-private in 0.15.0; use OSSFileIO to create InputFile instances
- */
+/** @deprecated moving to package-private in 0.15.0; use OSSFileIO to create InputFile instances */
 @Deprecated
 public class OSSInputFile extends BaseOSSFile implements InputFile {
 
@@ -38,7 +35,12 @@ public class OSSInputFile extends BaseOSSFile implements InputFile {
     super(client, uri, aliyunProperties, metrics);
   }
 
-  OSSInputFile(OSS client, OSSURI uri, AliyunProperties aliyunProperties, long length, MetricsContext metrics) {
+  OSSInputFile(
+      OSS client,
+      OSSURI uri,
+      AliyunProperties aliyunProperties,
+      long length,
+      MetricsContext metrics) {
     super(client, uri, aliyunProperties, metrics);
     ValidationException.check(length >= 0, "Invalid file length: %s", length);
     this.length = length;
diff --git a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSInputStream.java b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSInputStream.java
index b68ba97c33..58359faeb2 100644
--- a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSInputStream.java
+++ b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSInputStream.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
 import com.aliyun.oss.OSS;
@@ -35,9 +34,7 @@ import org.apache.iceberg.relocated.com.google.common.io.ByteStreams;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/**
- * @deprecated moving to package-private in 0.15.0
- */
+/** @deprecated moving to package-private in 0.15.0 */
 @Deprecated
 public class OSSInputStream extends SeekableInputStream {
   private static final Logger LOG = LoggerFactory.getLogger(OSSInputStream.class);
@@ -55,9 +52,7 @@ public class OSSInputStream extends SeekableInputStream {
   private final Counter<Long> readBytes;
   private final Counter<Integer> readOperations;
 
-  /**
-   * @deprecated moving to package-private in 0.15.0
-   */
+  /** @deprecated moving to package-private in 0.15.0 */
   @Deprecated
   public OSSInputStream(OSS client, OSSURI uri) {
     this(client, uri, MetricsContext.nullMetrics());
@@ -69,7 +64,8 @@ public class OSSInputStream extends SeekableInputStream {
     this.createStack = Thread.currentThread().getStackTrace();
 
     this.readBytes = metrics.counter(FileIOMetricsContext.READ_BYTES, Long.class, Unit.BYTES);
-    this.readOperations = metrics.counter(FileIOMetricsContext.READ_OPERATIONS, Integer.class, Unit.COUNT);
+    this.readOperations =
+        metrics.counter(FileIOMetricsContext.READ_OPERATIONS, Integer.class, Unit.COUNT);
   }
 
   @Override
diff --git a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSOutputFile.java b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSOutputFile.java
index d8865c7f78..9440478090 100644
--- a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSOutputFile.java
+++ b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSOutputFile.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
 import com.aliyun.oss.OSS;
@@ -33,8 +32,10 @@ class OSSOutputFile extends BaseOSSFile implements OutputFile {
     super(client, uri, aliyunProperties, metrics);
   }
 
-  static OSSOutputFile fromLocation(OSS client, String location, AliyunProperties aliyunProperties) {
-    return new OSSOutputFile(client, new OSSURI(location), aliyunProperties, MetricsContext.nullMetrics());
+  static OSSOutputFile fromLocation(
+      OSS client, String location, AliyunProperties aliyunProperties) {
+    return new OSSOutputFile(
+        client, new OSSURI(location), aliyunProperties, MetricsContext.nullMetrics());
   }
 
   @Override
diff --git a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSOutputStream.java b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSOutputStream.java
index dbbd513f7a..cd761434dc 100644
--- a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSOutputStream.java
+++ b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSOutputStream.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
 import com.aliyun.oss.OSS;
@@ -59,7 +58,8 @@ public class OSSOutputStream extends PositionOutputStream {
   private final Counter<Long> writeBytes;
   private final Counter<Integer> writeOperations;
 
-  OSSOutputStream(OSS client, OSSURI uri, AliyunProperties aliyunProperties, MetricsContext metrics) {
+  OSSOutputStream(
+      OSS client, OSSURI uri, AliyunProperties aliyunProperties, MetricsContext metrics) {
     this.client = client;
     this.uri = uri;
     this.createStack = Thread.currentThread().getStackTrace();
@@ -67,7 +67,8 @@ public class OSSOutputStream extends PositionOutputStream {
     this.currentStagingFile = newStagingFile(aliyunProperties.ossStagingDirectory());
     this.stream = newStream(currentStagingFile);
     this.writeBytes = metrics.counter(FileIOMetricsContext.WRITE_BYTES, Long.class, Unit.BYTES);
-    this.writeOperations = metrics.counter(FileIOMetricsContext.WRITE_OPERATIONS, Integer.class, Unit.COUNT);
+    this.writeOperations =
+        metrics.counter(FileIOMetricsContext.WRITE_OPERATIONS, Integer.class, Unit.COUNT);
   }
 
   private static File newStagingFile(String ossStagingDirectory) {
@@ -154,7 +155,8 @@ public class OSSOutputStream extends PositionOutputStream {
     ObjectMetadata metadata = new ObjectMetadata();
     metadata.setContentLength(contentLength);
 
-    PutObjectRequest request = new PutObjectRequest(uri.bucket(), uri.key(), contentStream, metadata);
+    PutObjectRequest request =
+        new PutObjectRequest(uri.bucket(), uri.key(), contentStream, metadata);
     client.putObject(request);
   }
 
diff --git a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSURI.java b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSURI.java
index c9dfd8acf3..74b937ac9b 100644
--- a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSURI.java
+++ b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/OSSURI.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
 import com.aliyun.oss.internal.OSSUtils;
@@ -26,13 +25,11 @@ import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
 import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
 
 /**
- * This class represents a fully qualified location in OSS for input/output
- * operations expressed as as URI.  This implementation is provided to
- * ensure compatibility with Hadoop Path implementations that may introduce
- * encoding issues with native URI implementation.
+ * This class represents a fully qualified location in OSS for input/output operations expressed as
+ * as URI. This implementation is provided to ensure compatibility with Hadoop Path implementations
+ * that may introduce encoding issues with native URI implementation.
  *
- * Note: Path-style access is deprecated and not supported by this
- * implementation.
+ * <p>Note: Path-style access is deprecated and not supported by this implementation.
  */
 public class OSSURI {
   private static final String SCHEME_DELIM = "://";
@@ -45,19 +42,16 @@ public class OSSURI {
   private final String key;
 
   /**
-   * Creates a new OSSURI based on the bucket and key parsed from the location
-   * The location in string form has the syntax as below, which refers to RFC2396:
-   * [scheme:][//bucket][object key][#fragment]
-   * [scheme:][//bucket][object key][?query][#fragment]
+   * Creates a new OSSURI based on the bucket and key parsed from the location The location in
+   * string form has the syntax as below, which refers to RFC2396: [scheme:][//bucket][object
+   * key][#fragment] [scheme:][//bucket][object key][?query][#fragment]
    *
-   * It specifies precisely which characters are permitted in the various components of a URI reference
-   * in Aliyun OSS documentation as below:
-   * Bucket: https://help.aliyun.com/document_detail/257087.html
-   * Object: https://help.aliyun.com/document_detail/273129.html
-   * Scheme: https or oss
+   * <p>It specifies precisely which characters are permitted in the various components of a URI
+   * reference in Aliyun OSS documentation as below: Bucket:
+   * https://help.aliyun.com/document_detail/257087.html Object:
+   * https://help.aliyun.com/document_detail/273129.html Scheme: https or oss
    *
-   * <p>
-   * Supported access styles are https and oss://... URIs.
+   * <p>Supported access styles are https and oss://... URIs.
    *
    * @param location fully qualified URI.
    */
@@ -69,14 +63,17 @@ public class OSSURI {
     ValidationException.check(schemeSplit.length == 2, "Invalid OSS location: %s", location);
 
     String scheme = schemeSplit[0];
-    ValidationException.check(VALID_SCHEMES.contains(scheme.toLowerCase()),
-            "Invalid scheme: %s in OSS location %s", scheme, location);
+    ValidationException.check(
+        VALID_SCHEMES.contains(scheme.toLowerCase()),
+        "Invalid scheme: %s in OSS location %s",
+        scheme,
+        location);
 
     String[] authoritySplit = schemeSplit[1].split(PATH_DELIM, 2);
-    ValidationException.check(authoritySplit.length == 2,
-            "Invalid bucket or key in OSS location: %s", location);
-    ValidationException.check(!authoritySplit[1].trim().isEmpty(),
-            "Missing key in OSS location: %s", location);
+    ValidationException.check(
+        authoritySplit.length == 2, "Invalid bucket or key in OSS location: %s", location);
+    ValidationException.check(
+        !authoritySplit[1].trim().isEmpty(), "Missing key in OSS location: %s", location);
     this.bucket = authoritySplit[0];
     OSSUtils.ensureBucketNameValid(bucket);
 
@@ -88,23 +85,17 @@ public class OSSURI {
     OSSUtils.ensureObjectKeyValid(key);
   }
 
-  /**
-   * Return OSS bucket name.
-   */
+  /** Return OSS bucket name. */
   public String bucket() {
     return bucket;
   }
 
-  /**
-   * Return OSS object key name.
-   */
+  /** Return OSS object key name. */
   public String key() {
     return key;
   }
 
-  /**
-   * Return original, unmodified OSS URI location.
-   */
+  /** Return original, unmodified OSS URI location. */
   public String location() {
     return location;
   }
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/TestAliyunClientFactories.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/TestAliyunClientFactories.java
index d5518a1ba8..fa071e8605 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/TestAliyunClientFactories.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/TestAliyunClientFactories.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun;
 
 import com.aliyun.oss.OSS;
@@ -30,23 +29,27 @@ public class TestAliyunClientFactories {
 
   @Test
   public void testLoadDefault() {
-    Assert.assertEquals("Default client should be singleton",
-        AliyunClientFactories.defaultFactory(), AliyunClientFactories.defaultFactory());
+    Assert.assertEquals(
+        "Default client should be singleton",
+        AliyunClientFactories.defaultFactory(),
+        AliyunClientFactories.defaultFactory());
 
     AliyunClientFactory defaultFactory = AliyunClientFactories.from(Maps.newHashMap());
     Assert.assertTrue(
         "Should load default when factory impl not configured",
-         defaultFactory instanceof AliyunClientFactories.DefaultAliyunClientFactory);
-    Assert.assertNull("Should have no Aliyun properties set",
-        defaultFactory.aliyunProperties().accessKeyId());
+        defaultFactory instanceof AliyunClientFactories.DefaultAliyunClientFactory);
+    Assert.assertNull(
+        "Should have no Aliyun properties set", defaultFactory.aliyunProperties().accessKeyId());
 
-    AliyunClientFactory defaultFactoryWithConfig = AliyunClientFactories.from(
-        ImmutableMap.of(AliyunProperties.CLIENT_ACCESS_KEY_ID, "key"));
+    AliyunClientFactory defaultFactoryWithConfig =
+        AliyunClientFactories.from(ImmutableMap.of(AliyunProperties.CLIENT_ACCESS_KEY_ID, "key"));
     Assert.assertTrue(
         "Should load default when factory impl not configured",
         defaultFactoryWithConfig instanceof AliyunClientFactories.DefaultAliyunClientFactory);
-    Assert.assertEquals("Should have access key set",
-        "key", defaultFactoryWithConfig.aliyunProperties().accessKeyId());
+    Assert.assertEquals(
+        "Should have access key set",
+        "key",
+        defaultFactoryWithConfig.aliyunProperties().accessKeyId());
   }
 
   @Test
@@ -62,8 +65,7 @@ public class TestAliyunClientFactories {
 
     AliyunProperties aliyunProperties;
 
-    public CustomFactory() {
-    }
+    public CustomFactory() {}
 
     @Override
     public OSS newOSSClient() {
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/TestUtility.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/TestUtility.java
index 762074de32..ac87a82fd7 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/TestUtility.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/TestUtility.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun;
 
 import org.apache.iceberg.aliyun.oss.AliyunOSSTestRule;
@@ -40,8 +39,7 @@ public class TestUtility {
   private static final String ALIYUN_TEST_OSS_ENDPOINT = "ALIYUN_TEST_OSS_ENDPOINT";
   private static final String ALIYUN_TEST_OSS_WAREHOUSE = "ALIYUN_TEST_OSS_WAREHOUSE";
 
-  private TestUtility() {
-  }
+  private TestUtility() {}
 
   public static AliyunOSSTestRule initialize() {
     AliyunOSSTestRule testRule;
@@ -54,11 +52,15 @@ public class TestUtility {
             DynConstructors.builder(AliyunOSSTestRule.class).impl(implClass).buildChecked();
         testRule = ctor.newInstance();
       } catch (NoSuchMethodException e) {
-        throw new IllegalArgumentException(String.format(
-            "Cannot initialize AliyunOSSTestRule, missing no-arg constructor: %s", implClass), e);
+        throw new IllegalArgumentException(
+            String.format(
+                "Cannot initialize AliyunOSSTestRule, missing no-arg constructor: %s", implClass),
+            e);
       } catch (ClassCastException e) {
-        throw new IllegalArgumentException(String.format(
-            "Cannot initialize AliyunOSSTestRule, %s does not implement it.", implClass), e);
+        throw new IllegalArgumentException(
+            String.format(
+                "Cannot initialize AliyunOSSTestRule, %s does not implement it.", implClass),
+            e);
       }
     } else {
       LOG.info("Initializing AliyunOSSTestRule implementation with default AliyunOSSMockRule");
@@ -94,8 +96,10 @@ public class TestUtility {
 
   private static OSSURI ossWarehouseURI() {
     String ossWarehouse = ossWarehouse();
-    Preconditions.checkNotNull(ossWarehouse,
-        "Please set a correct Aliyun OSS path for environment variable '%s'", ALIYUN_TEST_OSS_WAREHOUSE);
+    Preconditions.checkNotNull(
+        ossWarehouse,
+        "Please set a correct Aliyun OSS path for environment variable '%s'",
+        ALIYUN_TEST_OSS_WAREHOUSE);
 
     return new OSSURI(ossWarehouse);
   }
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/AliyunOSSTestBase.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/AliyunOSSTestBase.java
index 220a867832..8b42cfe9bd 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/AliyunOSSTestBase.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/AliyunOSSTestBase.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
 import com.aliyun.oss.OSS;
@@ -27,8 +26,7 @@ import org.junit.Before;
 import org.junit.ClassRule;
 
 public abstract class AliyunOSSTestBase {
-  @ClassRule
-  public static final AliyunOSSTestRule OSS_TEST_RULE = TestUtility.initialize();
+  @ClassRule public static final AliyunOSSTestRule OSS_TEST_RULE = TestUtility.initialize();
 
   private final SerializableSupplier<OSS> ossClient = OSS_TEST_RULE::createOSSClient;
   private final String bucketName = OSS_TEST_RULE.testBucketName();
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/AliyunOSSTestRule.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/AliyunOSSTestRule.java
index 3e43e5df6c..b9afa952aa 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/AliyunOSSTestRule.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/AliyunOSSTestRule.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
 import com.aliyun.oss.OSS;
@@ -26,16 +25,15 @@ import org.junit.runner.Description;
 import org.junit.runners.model.Statement;
 
 /**
- * API for test Aliyun Object Storage Service (OSS) which is either local mock http server or remote aliyun oss server
- * <p>
- * This API includes start,stop OSS service, create OSS client, setup bucket and teardown bucket.
+ * API for test Aliyun Object Storage Service (OSS) which is either local mock http server or remote
+ * aliyun oss server
+ *
+ * <p>This API includes start,stop OSS service, create OSS client, setup bucket and teardown bucket.
  */
 public interface AliyunOSSTestRule extends TestRule {
   UUID RANDOM_UUID = java.util.UUID.randomUUID();
 
-  /**
-   * Returns a specific bucket name for testing purpose.
-   */
+  /** Returns a specific bucket name for testing purpose. */
   default String testBucketName() {
     return String.format("oss-testing-bucket-%s", RANDOM_UUID);
   }
@@ -56,9 +54,10 @@ public interface AliyunOSSTestRule extends TestRule {
   }
 
   /**
-   * Returns the common key prefix for those newly created objects in test cases. For example, we set the test bucket
-   * to be 'oss-testing-bucket' and the key prefix to be 'iceberg-objects/', then the produced objects in test cases
-   * will be:
+   * Returns the common key prefix for those newly created objects in test cases. For example, we
+   * set the test bucket to be 'oss-testing-bucket' and the key prefix to be 'iceberg-objects/',
+   * then the produced objects in test cases will be:
+   *
    * <pre>
    *   oss://oss-testing-bucket/iceberg-objects/a.dat
    *   oss://oss-testing-bucket/iceberg-objects/b.dat
@@ -67,28 +66,21 @@ public interface AliyunOSSTestRule extends TestRule {
    */
   String keyPrefix();
 
-  /**
-   * Start the Aliyun Object storage services application that the OSS client could connect to.
-   */
+  /** Start the Aliyun Object storage services application that the OSS client could connect to. */
   void start();
 
-  /**
-   * Stop the Aliyun object storage services.
-   */
+  /** Stop the Aliyun object storage services. */
   void stop();
 
-  /**
-   * Returns an newly created {@link OSS} client.
-   */
+  /** Returns an newly created {@link OSS} client. */
   OSS createOSSClient();
 
   /**
-   * Preparation work of bucket for the test case, for example we need to check the existence of specific bucket.
+   * Preparation work of bucket for the test case, for example we need to check the existence of
+   * specific bucket.
    */
   void setUpBucket(String bucket);
 
-  /**
-   * Clean all the objects that created from this test suite in the bucket.
-   */
+  /** Clean all the objects that created from this test suite in the bucket. */
   void tearDownBucket(String bucket);
 }
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/OSSIntegrationTestRule.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/OSSIntegrationTestRule.java
index 691d6d02eb..21e427385a 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/OSSIntegrationTestRule.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/OSSIntegrationTestRule.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
 import com.aliyun.oss.OSS;
@@ -80,7 +79,8 @@ public class OSSIntegrationTestRule implements AliyunOSSTestRule {
   public void setUpBucket(String bucket) {
     Preconditions.checkArgument(
         ossClient().doesBucketExist(bucket),
-        "Bucket %s does not exist, please create it firstly.", bucket);
+        "Bucket %s does not exist, please create it firstly.",
+        bucket);
   }
 
   @Override
@@ -89,10 +89,11 @@ public class OSSIntegrationTestRule implements AliyunOSSTestRule {
     String nextContinuationToken = null;
     ListObjectsV2Result objectListingResult;
     do {
-      ListObjectsV2Request listObjectsV2Request = new ListObjectsV2Request(bucket)
-          .withMaxKeys(maxKeys)
-          .withPrefix(ossKey)
-          .withContinuationToken(nextContinuationToken);
+      ListObjectsV2Request listObjectsV2Request =
+          new ListObjectsV2Request(bucket)
+              .withMaxKeys(maxKeys)
+              .withPrefix(ossKey)
+              .withContinuationToken(nextContinuationToken);
       objectListingResult = ossClient().listObjectsV2(listObjectsV2Request);
 
       for (OSSObjectSummary s : objectListingResult.getObjectSummaries()) {
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSFileIO.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSFileIO.java
index 9bebfae7c0..febbf3fe33 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSFileIO.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSFileIO.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
 import com.aliyun.oss.OSS;
@@ -74,7 +73,8 @@ public class TestOSSFileIO extends AliyunOSSTestBase {
     writeOSSData(out, data);
 
     OSSURI uri = new OSSURI(location);
-    Assert.assertTrue("OSS file should exist", ossClient().get().doesObjectExist(uri.bucket(), uri.key()));
+    Assert.assertTrue(
+        "OSS file should exist", ossClient().get().doesObjectExist(uri.bucket(), uri.key()));
     Assert.assertEquals("Should have expected location", location, out.location());
     Assert.assertEquals("Should have expected length", dataSize, ossDataLength(uri));
     Assert.assertArrayEquals("Should have expected content", data, ossDataContent(uri, dataSize));
@@ -118,7 +118,8 @@ public class TestOSSFileIO extends AliyunOSSTestBase {
 
     byte[] data = SerializationUtil.serializeToBytes(file);
     FileIO expectedFileIO = SerializationUtil.deserializeFromBytes(data);
-    Assert.assertTrue("The deserialized FileIO should be OSSFileIO", expectedFileIO instanceof OSSFileIO);
+    Assert.assertTrue(
+        "The deserialized FileIO should be OSSFileIO", expectedFileIO instanceof OSSFileIO);
   }
 
   @Test
@@ -126,7 +127,8 @@ public class TestOSSFileIO extends AliyunOSSTestBase {
     String endpoint = "iceberg-test-oss.aliyun.com";
     String accessKeyId = UUID.randomUUID().toString();
     String accessSecret = UUID.randomUUID().toString();
-    SerializableSupplier<OSS> pre = () -> new OSSClientBuilder().build(endpoint, accessKeyId, accessSecret);
+    SerializableSupplier<OSS> pre =
+        () -> new OSSClientBuilder().build(endpoint, accessKeyId, accessSecret);
 
     byte[] data = SerializationUtil.serializeToBytes(pre);
     SerializableSupplier<OSS> post = SerializationUtil.deserializeFromBytes(data);
@@ -135,12 +137,16 @@ public class TestOSSFileIO extends AliyunOSSTestBase {
     Assert.assertTrue("Should be instance of oss client", client instanceof OSSClient);
 
     OSSClient oss = (OSSClient) client;
-    Assert.assertEquals("Should have expected endpoint",
-        new URI("http://" + endpoint), oss.getEndpoint());
-    Assert.assertEquals("Should have expected access key",
-        accessKeyId, oss.getCredentialsProvider().getCredentials().getAccessKeyId());
-    Assert.assertEquals("Should have expected secret key",
-        accessSecret, oss.getCredentialsProvider().getCredentials().getSecretAccessKey());
+    Assert.assertEquals(
+        "Should have expected endpoint", new URI("http://" + endpoint), oss.getEndpoint());
+    Assert.assertEquals(
+        "Should have expected access key",
+        accessKeyId,
+        oss.getCredentialsProvider().getCredentials().getAccessKeyId());
+    Assert.assertEquals(
+        "Should have expected secret key",
+        accessSecret,
+        oss.getCredentialsProvider().getCredentials().getSecretAccessKey());
   }
 
   private FileIO fileIO() {
@@ -158,7 +164,11 @@ public class TestOSSFileIO extends AliyunOSSTestBase {
   }
 
   private long ossDataLength(OSSURI uri) {
-    return ossClient().get().getObject(uri.bucket(), uri.key()).getObjectMetadata().getContentLength();
+    return ossClient()
+        .get()
+        .getObject(uri.bucket(), uri.key())
+        .getObjectMetadata()
+        .getContentLength();
   }
 
   private byte[] ossDataContent(OSSURI uri, int dataSize) throws IOException {
@@ -170,7 +180,8 @@ public class TestOSSFileIO extends AliyunOSSTestBase {
   }
 
   private void writeOSSData(OutputFile out, byte[] data) throws IOException {
-    try (OutputStream os = out.create(); InputStream is = new ByteArrayInputStream(data)) {
+    try (OutputStream os = out.create();
+        InputStream is = new ByteArrayInputStream(data)) {
       ByteStreams.copy(is, os);
     }
   }
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSInputFile.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSInputFile.java
index 44d370ca9f..2f67dea7c3 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSInputFile.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSInputFile.java
@@ -16,9 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
+import static org.mockito.AdditionalAnswers.delegatesTo;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
 import com.aliyun.oss.OSS;
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
@@ -35,12 +40,6 @@ import org.apache.iceberg.relocated.com.google.common.io.ByteStreams;
 import org.junit.Assert;
 import org.junit.Test;
 
-import static org.mockito.AdditionalAnswers.delegatesTo;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
 public class TestOSSInputFile extends AliyunOSSTestBase {
   private final OSS ossClient = ossClient().get();
   private final OSS ossMock = mock(OSS.class, delegatesTo(ossClient));
@@ -62,16 +61,21 @@ public class TestOSSInputFile extends AliyunOSSTestBase {
   @Test
   public void testOSSInputFile() {
     OSSURI uri = randomURI();
-    AssertHelpers.assertThrows("File length should not be negative", ValidationException.class,
+    AssertHelpers.assertThrows(
+        "File length should not be negative",
+        ValidationException.class,
         "Invalid file length",
-        () -> new OSSInputFile(ossClient().get(), uri, aliyunProperties, -1, MetricsContext.nullMetrics()));
+        () ->
+            new OSSInputFile(
+                ossClient().get(), uri, aliyunProperties, -1, MetricsContext.nullMetrics()));
   }
 
   @Test
   public void testExists() {
     OSSURI uri = randomURI();
 
-    InputFile inputFile = new OSSInputFile(ossMock, uri, aliyunProperties, MetricsContext.nullMetrics());
+    InputFile inputFile =
+        new OSSInputFile(ossMock, uri, aliyunProperties, MetricsContext.nullMetrics());
     Assert.assertFalse("OSS file should not exist", inputFile.exists());
     verify(ossMock, times(1)).getSimplifiedObjectMeta(uri.bucket(), uri.key());
     reset(ossMock);
@@ -104,7 +108,8 @@ public class TestOSSInputFile extends AliyunOSSTestBase {
   }
 
   private void readAndVerify(OSSURI uri, byte[] data) throws IOException {
-    InputFile inputFile = new OSSInputFile(ossClient().get(), uri, aliyunProperties, MetricsContext.nullMetrics());
+    InputFile inputFile =
+        new OSSInputFile(ossClient().get(), uri, aliyunProperties, MetricsContext.nullMetrics());
     Assert.assertTrue("OSS file should exist", inputFile.exists());
     Assert.assertEquals("Should have expected file length", data.length, inputFile.getLength());
 
@@ -118,9 +123,12 @@ public class TestOSSInputFile extends AliyunOSSTestBase {
   private void verifyLength(OSS ossClientMock, OSSURI uri, byte[] data, boolean isCache) {
     InputFile inputFile;
     if (isCache) {
-      inputFile = new OSSInputFile(ossClientMock, uri, aliyunProperties, data.length, MetricsContext.nullMetrics());
+      inputFile =
+          new OSSInputFile(
+              ossClientMock, uri, aliyunProperties, data.length, MetricsContext.nullMetrics());
     } else {
-      inputFile = new OSSInputFile(ossClientMock, uri, aliyunProperties, MetricsContext.nullMetrics());
+      inputFile =
+          new OSSInputFile(ossClientMock, uri, aliyunProperties, MetricsContext.nullMetrics());
     }
     inputFile.getLength();
     Assert.assertEquals("Should have expected file length", data.length, inputFile.getLength());
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSInputStream.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSInputStream.java
index 633efb48f1..49b9dbd354 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSInputStream.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSInputStream.java
@@ -16,9 +16,12 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
+import static org.apache.iceberg.AssertHelpers.assertThrows;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.util.Arrays;
@@ -28,10 +31,6 @@ import org.apache.iceberg.io.SeekableInputStream;
 import org.apache.iceberg.relocated.com.google.common.io.ByteStreams;
 import org.junit.Test;
 
-import static org.apache.iceberg.AssertHelpers.assertThrows;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-
 public class TestOSSInputStream extends AliyunOSSTestBase {
   private final Random random = ThreadLocalRandom.current();
 
@@ -69,7 +68,8 @@ public class TestOSSInputStream extends AliyunOSSTestBase {
     }
   }
 
-  private void readAndCheck(SeekableInputStream in, long rangeStart, int size, byte[] original, boolean buffered)
+  private void readAndCheck(
+      SeekableInputStream in, long rangeStart, int size, byte[] original, boolean buffered)
       throws IOException {
     in.seek(rangeStart);
     assertEquals("Should have the correct position", rangeStart, in.getPos());
@@ -88,8 +88,10 @@ public class TestOSSInputStream extends AliyunOSSTestBase {
 
     assertEquals("Should have the correct position", rangeEnd, in.getPos());
 
-    assertArrayEquals("Should have expected range data",
-        Arrays.copyOfRange(original, (int) rangeStart, (int) rangeEnd), actual);
+    assertArrayEquals(
+        "Should have expected range data",
+        Arrays.copyOfRange(original, (int) rangeStart, (int) rangeEnd),
+        actual);
   }
 
   @Test
@@ -97,7 +99,9 @@ public class TestOSSInputStream extends AliyunOSSTestBase {
     OSSURI uri = new OSSURI(location("closed.dat"));
     SeekableInputStream closed = new OSSInputStream(ossClient().get(), uri);
     closed.close();
-    assertThrows("Cannot seek the input stream after closed.", IllegalStateException.class,
+    assertThrows(
+        "Cannot seek the input stream after closed.",
+        IllegalStateException.class,
         "Cannot seek: already closed",
         () -> {
           closed.seek(0);
@@ -116,8 +120,10 @@ public class TestOSSInputStream extends AliyunOSSTestBase {
       in.seek(expected.length / 2);
       byte[] actual = new byte[expected.length / 2];
       ByteStreams.readFully(in, actual);
-      assertArrayEquals("Should have expected seeking stream",
-          Arrays.copyOfRange(expected, expected.length / 2, expected.length), actual);
+      assertArrayEquals(
+          "Should have expected seeking stream",
+          Arrays.copyOfRange(expected, expected.length / 2, expected.length),
+          actual);
     }
   }
 
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSOutputFile.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSOutputFile.java
index 2ba900b577..5a63c9f552 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSOutputFile.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSOutputFile.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
 import com.aliyun.oss.OSS;
@@ -50,7 +49,8 @@ public class TestOSSOutputFile extends AliyunOSSTestBase {
     byte[] data = randomData(dataSize);
 
     OutputFile out = OSSOutputFile.fromLocation(ossClient, uri.location(), aliyunProperties);
-    try (OutputStream os = out.create(); InputStream is = new ByteArrayInputStream(data)) {
+    try (OutputStream os = out.create();
+        InputStream is = new ByteArrayInputStream(data)) {
       ByteStreams.copy(is, os);
     }
 
@@ -79,8 +79,11 @@ public class TestOSSOutputFile extends AliyunOSSTestBase {
     writeOSSData(uri, data);
 
     OutputFile out = OSSOutputFile.fromLocation(ossClient, uri.location(), aliyunProperties);
-    AssertHelpers.assertThrows("Should complain about location already exists",
-        AlreadyExistsException.class, "Location already exists", out::create);
+    AssertHelpers.assertThrows(
+        "Should complain about location already exists",
+        AlreadyExistsException.class,
+        "Location already exists",
+        out::create);
   }
 
   @Test
@@ -95,12 +98,15 @@ public class TestOSSOutputFile extends AliyunOSSTestBase {
     byte[] expect = randomData(expectSize);
 
     OutputFile out = OSSOutputFile.fromLocation(ossClient, uri.location(), aliyunProperties);
-    try (OutputStream os = out.createOrOverwrite(); InputStream is = new ByteArrayInputStream(expect)) {
+    try (OutputStream os = out.createOrOverwrite();
+        InputStream is = new ByteArrayInputStream(expect)) {
       ByteStreams.copy(is, os);
     }
 
-    Assert.assertEquals(String.format("Should overwrite object length from %d to %d", dataSize, expectSize),
-        expectSize, ossDataLength(uri));
+    Assert.assertEquals(
+        String.format("Should overwrite object length from %d to %d", dataSize, expectSize),
+        expectSize,
+        ossDataLength(uri));
 
     byte[] actual = ossDataContent(uri, expectSize);
     Assert.assertArrayEquals("Should overwrite object content", expect, actual);
@@ -109,7 +115,8 @@ public class TestOSSOutputFile extends AliyunOSSTestBase {
   @Test
   public void testLocation() {
     OSSURI uri = randomURI();
-    OutputFile out = new OSSOutputFile(ossClient, uri, aliyunProperties, MetricsContext.nullMetrics());
+    OutputFile out =
+        new OSSOutputFile(ossClient, uri, aliyunProperties, MetricsContext.nullMetrics());
     Assert.assertEquals("Location should match", uri.location(), out.location());
   }
 
@@ -118,8 +125,10 @@ public class TestOSSOutputFile extends AliyunOSSTestBase {
     int dataSize = 1024 * 10;
     byte[] data = randomData(dataSize);
 
-    OutputFile out = new OSSOutputFile(ossClient, randomURI(), aliyunProperties, MetricsContext.nullMetrics());
-    try (OutputStream os = out.create(); InputStream is = new ByteArrayInputStream(data)) {
+    OutputFile out =
+        new OSSOutputFile(ossClient, randomURI(), aliyunProperties, MetricsContext.nullMetrics());
+    try (OutputStream os = out.create();
+        InputStream is = new ByteArrayInputStream(data)) {
       ByteStreams.copy(is, os);
     }
 
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSOutputStream.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSOutputStream.java
index 0567cc91b5..9fa7a648f8 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSOutputStream.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSOutputStream.java
@@ -16,9 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
+import static org.mockito.AdditionalAnswers.delegatesTo;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
 import com.aliyun.oss.OSS;
 import java.io.IOException;
 import java.io.InputStream;
@@ -37,13 +43,6 @@ import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.mockito.AdditionalAnswers.delegatesTo;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
 public class TestOSSOutputStream extends AliyunOSSTestBase {
   private static final Logger LOG = LoggerFactory.getLogger(TestOSSOutputStream.class);
 
@@ -53,12 +52,11 @@ public class TestOSSOutputStream extends AliyunOSSTestBase {
   private final Path tmpDir = Files.createTempDirectory("oss-file-io-test-");
   private static final Random random = ThreadLocalRandom.current();
 
-  private final AliyunProperties props = new AliyunProperties(ImmutableMap.of(
-      AliyunProperties.OSS_STAGING_DIRECTORY, tmpDir.toString()
-  ));
+  private final AliyunProperties props =
+      new AliyunProperties(
+          ImmutableMap.of(AliyunProperties.OSS_STAGING_DIRECTORY, tmpDir.toString()));
 
-  public TestOSSOutputStream() throws IOException {
-  }
+  public TestOSSOutputStream() throws IOException {}
 
   @Test
   public void testWrite() throws IOException {
@@ -80,10 +78,14 @@ public class TestOSSOutputStream extends AliyunOSSTestBase {
 
   private void writeAndVerify(OSS mock, OSSURI uri, byte[] data, boolean arrayWrite)
       throws IOException {
-    LOG.info("Write and verify for arguments uri: {}, data length: {}, arrayWrite: {}",
-            uri, data.length, arrayWrite);
-
-    try (OSSOutputStream out = new OSSOutputStream(mock, uri, props, MetricsContext.nullMetrics())) {
+    LOG.info(
+        "Write and verify for arguments uri: {}, data length: {}, arrayWrite: {}",
+        uri,
+        data.length,
+        arrayWrite);
+
+    try (OSSOutputStream out =
+        new OSSOutputStream(mock, uri, props, MetricsContext.nullMetrics())) {
       if (arrayWrite) {
         out.write(data);
         Assert.assertEquals("OSSOutputStream position", data.length, out.getPos());
@@ -95,16 +97,21 @@ public class TestOSSOutputStream extends AliyunOSSTestBase {
       }
     }
 
-    Assert.assertTrue("OSS object should exist", ossClient.doesObjectExist(uri.bucket(), uri.key()));
-    Assert.assertEquals("Object length",
-        ossClient.getObject(uri.bucket(), uri.key()).getObjectMetadata().getContentLength(), data.length);
+    Assert.assertTrue(
+        "OSS object should exist", ossClient.doesObjectExist(uri.bucket(), uri.key()));
+    Assert.assertEquals(
+        "Object length",
+        ossClient.getObject(uri.bucket(), uri.key()).getObjectMetadata().getContentLength(),
+        data.length);
 
     byte[] actual = ossDataContent(uri, data.length);
     Assert.assertArrayEquals("Object content", data, actual);
 
     // Verify all staging files are cleaned up.
-    Assert.assertEquals("Staging files should clean up",
-        0, Files.list(Paths.get(props.ossStagingDirectory())).count());
+    Assert.assertEquals(
+        "Staging files should clean up",
+        0,
+        Files.list(Paths.get(props.ossStagingDirectory())).count());
   }
 
   private OSSURI randomURI() {
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSURI.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSURI.java
index f76383d0c0..3621151b64 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSURI.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/TestOSSURI.java
@@ -16,17 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss;
 
+import static com.aliyun.oss.internal.OSSUtils.OSS_RESOURCE_MANAGER;
+
 import org.apache.iceberg.AssertHelpers;
 import org.apache.iceberg.exceptions.ValidationException;
 import org.apache.iceberg.relocated.com.google.common.collect.Lists;
 import org.junit.Assert;
 import org.junit.Test;
 
-import static com.aliyun.oss.internal.OSSUtils.OSS_RESOURCE_MANAGER;
-
 public class TestOSSURI {
   @Test
   public void testUrlParsing() {
@@ -50,34 +49,47 @@ public class TestOSSURI {
 
   @Test
   public void invalidBucket() {
-    AssertHelpers.assertThrows("Invalid bucket", IllegalArgumentException.class,
+    AssertHelpers.assertThrows(
+        "Invalid bucket",
+        IllegalArgumentException.class,
         OSS_RESOURCE_MANAGER.getFormattedString("BucketNameInvalid", "test_bucket"),
         () -> new OSSURI("https://test_bucket/path/to/file"));
   }
 
   @Test
   public void missingKey() {
-    AssertHelpers.assertThrows("Missing key", ValidationException.class,
-        "Missing key in OSS location", () -> new OSSURI("https://bucket/"));
+    AssertHelpers.assertThrows(
+        "Missing key",
+        ValidationException.class,
+        "Missing key in OSS location",
+        () -> new OSSURI("https://bucket/"));
   }
 
   @Test
   public void invalidKey() {
-    AssertHelpers.assertThrows("Invalid key", IllegalArgumentException.class,
+    AssertHelpers.assertThrows(
+        "Invalid key",
+        IllegalArgumentException.class,
         OSS_RESOURCE_MANAGER.getFormattedString("ObjectKeyInvalid", "\\path/to/file"),
         () -> new OSSURI("https://bucket/\\path/to/file"));
   }
 
   @Test
   public void relativePathing() {
-    AssertHelpers.assertThrows("Cannot use relative oss location.", ValidationException.class,
-        "Invalid OSS location", () -> new OSSURI("/path/to/file"));
+    AssertHelpers.assertThrows(
+        "Cannot use relative oss location.",
+        ValidationException.class,
+        "Invalid OSS location",
+        () -> new OSSURI("/path/to/file"));
   }
 
   @Test
   public void invalidScheme() {
-    AssertHelpers.assertThrows("Only support scheme: oss/https", ValidationException.class,
-        "Invalid scheme", () -> new OSSURI("invalid://bucket/"));
+    AssertHelpers.assertThrows(
+        "Only support scheme: oss/https",
+        ValidationException.class,
+        "Invalid scheme",
+        () -> new OSSURI("invalid://bucket/"));
   }
 
   @Test
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockApp.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockApp.java
index 81e2e91156..ea0ef0fe4d 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockApp.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockApp.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss.mock;
 
 import java.util.List;
@@ -44,9 +43,11 @@ import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
 
 @SuppressWarnings("checkstyle:AnnotationUseStyle")
 @Configuration
-@EnableAutoConfiguration(exclude = {SecurityAutoConfiguration.class}, excludeName = {
-    "org.springframework.boot.actuate.autoconfigure.security.servlet.ManagementWebSecurityAutoConfiguration"
-})
+@EnableAutoConfiguration(
+    exclude = {SecurityAutoConfiguration.class},
+    excludeName = {
+      "org.springframework.boot.actuate.autoconfigure.security.servlet.ManagementWebSecurityAutoConfiguration"
+    })
 @ComponentScan
 public class AliyunOSSMockApp {
 
@@ -57,8 +58,7 @@ public class AliyunOSSMockApp {
 
   static final String PROP_SILENT = "silent";
 
-  @Autowired
-  private ConfigurableApplicationContext context;
+  @Autowired private ConfigurableApplicationContext context;
 
   public static AliyunOSSMockApp start(Map<String, Object> properties, String... args) {
     Map<String, Object> defaults = Maps.newHashMap();
@@ -105,7 +105,8 @@ public class AliyunOSSMockApp {
       mediaTypes.add(MediaType.APPLICATION_FORM_URLENCODED);
       mediaTypes.add(MediaType.APPLICATION_OCTET_STREAM);
 
-      final MappingJackson2XmlHttpMessageConverter xmlConverter = new MappingJackson2XmlHttpMessageConverter();
+      final MappingJackson2XmlHttpMessageConverter xmlConverter =
+          new MappingJackson2XmlHttpMessageConverter();
       xmlConverter.setSupportedMediaTypes(mediaTypes);
 
       return xmlConverter;
@@ -114,7 +115,8 @@ public class AliyunOSSMockApp {
 
   private static class RangeConverter implements Converter<String, Range> {
 
-    private static final Pattern REQUESTED_RANGE_PATTERN = Pattern.compile("^bytes=((\\d*)-(\\d*))((,\\d*-\\d*)*)");
+    private static final Pattern REQUESTED_RANGE_PATTERN =
+        Pattern.compile("^bytes=((\\d*)-(\\d*))((,\\d*-\\d*)*)");
 
     @Override
     public Range convert(String rangeString) {
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockLocalController.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockLocalController.java
index 1c8539b0f6..0cc76825c2 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockLocalController.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockLocalController.java
@@ -16,9 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss.mock;
 
+import static org.springframework.http.HttpStatus.INTERNAL_SERVER_ERROR;
+import static org.springframework.http.HttpStatus.OK;
+import static org.springframework.http.HttpStatus.PARTIAL_CONTENT;
+import static org.springframework.http.HttpStatus.REQUESTED_RANGE_NOT_SATISFIABLE;
+
 import com.aliyun.oss.OSSErrorCode;
 import com.aliyun.oss.model.Bucket;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -47,17 +51,11 @@ import org.springframework.web.bind.annotation.RequestMethod;
 import org.springframework.web.bind.annotation.RestController;
 import org.springframework.web.servlet.mvc.method.annotation.ResponseEntityExceptionHandler;
 
-import static org.springframework.http.HttpStatus.INTERNAL_SERVER_ERROR;
-import static org.springframework.http.HttpStatus.OK;
-import static org.springframework.http.HttpStatus.PARTIAL_CONTENT;
-import static org.springframework.http.HttpStatus.REQUESTED_RANGE_NOT_SATISFIABLE;
-
 @RestController
 public class AliyunOSSMockLocalController {
   private static final Logger LOG = LoggerFactory.getLogger(AliyunOSSMockLocalController.class);
 
-  @Autowired
-  private AliyunOSSMockLocalStore localStore;
+  @Autowired private AliyunOSSMockLocalStore localStore;
 
   private static String filenameFrom(@PathVariable String bucketName, HttpServletRequest request) {
     String requestUri = request.getRequestURI();
@@ -67,13 +65,17 @@ public class AliyunOSSMockLocalController {
   @RequestMapping(value = "/{bucketName}", method = RequestMethod.PUT, produces = "application/xml")
   public void putBucket(@PathVariable String bucketName) throws IOException {
     if (localStore.getBucket(bucketName) != null) {
-      throw new OssException(409, OSSErrorCode.BUCKET_ALREADY_EXISTS, bucketName + " already exists.");
+      throw new OssException(
+          409, OSSErrorCode.BUCKET_ALREADY_EXISTS, bucketName + " already exists.");
     }
 
     localStore.createBucket(bucketName);
   }
 
-  @RequestMapping(value = "/{bucketName}", method = RequestMethod.DELETE, produces = "application/xml")
+  @RequestMapping(
+      value = "/{bucketName}",
+      method = RequestMethod.DELETE,
+      produces = "application/xml")
   public void deleteBucket(@PathVariable String bucketName) throws IOException {
     verifyBucketExistence(bucketName);
 
@@ -81,17 +83,19 @@ public class AliyunOSSMockLocalController {
   }
 
   @RequestMapping(value = "/{bucketName:.+}/**", method = RequestMethod.PUT)
-  public ResponseEntity<String> putObject(@PathVariable String bucketName, HttpServletRequest request) {
+  public ResponseEntity<String> putObject(
+      @PathVariable String bucketName, HttpServletRequest request) {
     verifyBucketExistence(bucketName);
     String filename = filenameFrom(bucketName, request);
     try (ServletInputStream inputStream = request.getInputStream()) {
-      ObjectMetadata metadata = localStore.putObject(
-          bucketName,
-          filename,
-          inputStream,
-          request.getContentType(),
-          request.getHeader(HttpHeaders.CONTENT_ENCODING),
-          ImmutableMap.of());
+      ObjectMetadata metadata =
+          localStore.putObject(
+              bucketName,
+              filename,
+              inputStream,
+              request.getContentType(),
+              request.getHeader(HttpHeaders.CONTENT_ENCODING),
+              ImmutableMap.of());
 
       HttpHeaders responseHeaders = new HttpHeaders();
       responseHeaders.setETag("\"" + metadata.getContentMD5() + "\"");
@@ -112,7 +116,8 @@ public class AliyunOSSMockLocalController {
   }
 
   @RequestMapping(value = "/{bucketName:.+}/**", method = RequestMethod.HEAD)
-  public ResponseEntity<String> getObjectMeta(@PathVariable String bucketName, HttpServletRequest request) {
+  public ResponseEntity<String> getObjectMeta(
+      @PathVariable String bucketName, HttpServletRequest request) {
     verifyBucketExistence(bucketName);
     ObjectMetadata metadata = verifyObjectExistence(bucketName, filenameFrom(bucketName, request));
 
@@ -133,7 +138,8 @@ public class AliyunOSSMockLocalController {
       @PathVariable String bucketName,
       @RequestHeader(value = "Range", required = false) Range range,
       HttpServletRequest request,
-      HttpServletResponse response) throws IOException {
+      HttpServletResponse response)
+      throws IOException {
     verifyBucketExistence(bucketName);
 
     String filename = filenameFrom(bucketName, request);
@@ -158,8 +164,11 @@ public class AliyunOSSMockLocalController {
 
       response.setStatus(PARTIAL_CONTENT.value());
       response.setHeader(HttpHeaders.ACCEPT_RANGES, "bytes");
-      response.setHeader(HttpHeaders.CONTENT_RANGE, String.format("bytes %s-%s/%s",
-          range.start(), bytesToRead + range.start() + 1, metadata.getContentLength()));
+      response.setHeader(
+          HttpHeaders.CONTENT_RANGE,
+          String.format(
+              "bytes %s-%s/%s",
+              range.start(), bytesToRead + range.start() + 1, metadata.getContentLength()));
       response.setHeader(HttpHeaders.ETAG, "\"" + metadata.getContentMD5() + "\"");
       response.setDateHeader(HttpHeaders.LAST_MODIFIED, metadata.getLastModificationDate());
       response.setContentType(metadata.getContentType());
@@ -189,7 +198,8 @@ public class AliyunOSSMockLocalController {
   private void verifyBucketExistence(String bucketName) {
     Bucket bucket = localStore.getBucket(bucketName);
     if (bucket == null) {
-      throw new OssException(404, OSSErrorCode.NO_SUCH_BUCKET, "The specified bucket does not exist. ");
+      throw new OssException(
+          404, OSSErrorCode.NO_SUCH_BUCKET, "The specified bucket does not exist. ");
     }
   }
 
@@ -198,7 +208,8 @@ public class AliyunOSSMockLocalController {
     try {
       objectMetadata = localStore.getObjectMetadata(bucketName, filename);
     } catch (IOException e) {
-      LOG.error("Failed to get the object metadata, bucket: {}, object: {}.", bucketName, filename, e);
+      LOG.error(
+          "Failed to get the object metadata, bucket: {}, object: {}.", bucketName, filename, e);
     }
 
     if (objectMetadata == null) {
@@ -222,9 +233,7 @@ public class AliyunOSSMockLocalController {
       HttpHeaders headers = new HttpHeaders();
       headers.setContentType(MediaType.APPLICATION_XML);
 
-      return ResponseEntity.status(ex.status)
-          .headers(headers)
-          .body(errorResponse);
+      return ResponseEntity.status(ex.status).headers(headers).body(errorResponse);
     }
   }
 
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockLocalStore.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockLocalStore.java
index 22bf1dd18e..75766a6714 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockLocalStore.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockLocalStore.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss.mock;
 
 import com.aliyun.oss.OSSErrorCode;
@@ -59,7 +58,8 @@ public class AliyunOSSMockLocalStore {
 
   private final ObjectMapper objectMapper = new ObjectMapper();
 
-  public AliyunOSSMockLocalStore(@Value("${" + AliyunOSSMockApp.PROP_ROOT_DIR + ":}") String rootDir) {
+  public AliyunOSSMockLocalStore(
+      @Value("${" + AliyunOSSMockApp.PROP_ROOT_DIR + ":}") String rootDir) {
     Preconditions.checkNotNull(rootDir, "Root directory cannot be null");
     this.root = new File(rootDir);
 
@@ -92,7 +92,8 @@ public class AliyunOSSMockLocalStore {
     return new String(Hex.encodeHex(md.digest())).toUpperCase(Locale.ROOT);
   }
 
-  private static void inputStreamToFile(InputStream inputStream, File targetFile) throws IOException {
+  private static void inputStreamToFile(InputStream inputStream, File targetFile)
+      throws IOException {
     try (OutputStream outputStream = new FileOutputStream(targetFile)) {
       ByteStreams.copy(inputStream, outputStream);
     }
@@ -104,8 +105,9 @@ public class AliyunOSSMockLocalStore {
   }
 
   Bucket getBucket(String bucketName) {
-    List<Bucket> buckets = findBucketsByFilter(file ->
-        Files.isDirectory(file) && file.getFileName().endsWith(bucketName));
+    List<Bucket> buckets =
+        findBucketsByFilter(
+            file -> Files.isDirectory(file) && file.getFileName().endsWith(bucketName));
 
     return buckets.size() > 0 ? buckets.get(0) : null;
   }
@@ -116,8 +118,8 @@ public class AliyunOSSMockLocalStore {
 
     File dir = new File(root, bucket.getName());
     if (Files.walk(dir.toPath()).anyMatch(p -> p.toFile().isFile())) {
-      throw new AliyunOSSMockLocalController.OssException(409, OSSErrorCode.BUCKET_NOT_EMPTY,
-          "The bucket you tried to delete is not empty. ");
+      throw new AliyunOSSMockLocalController.OssException(
+          409, OSSErrorCode.BUCKET_NOT_EMPTY, "The bucket you tried to delete is not empty. ");
     }
 
     FileUtils.deleteDirectory(dir);
@@ -129,7 +131,8 @@ public class AliyunOSSMockLocalStore {
       InputStream dataStream,
       String contentType,
       String contentEncoding,
-      Map<String, String> userMetaData) throws IOException {
+      Map<String, String> userMetaData)
+      throws IOException {
     File bucketDir = new File(root, bucketName);
     assert bucketDir.exists() || bucketDir.mkdirs();
 
@@ -145,12 +148,14 @@ public class AliyunOSSMockLocalStore {
     ObjectMetadata metadata = new ObjectMetadata();
     metadata.setContentLength(dataFile.length());
     metadata.setContentMD5(md5sum(dataFile.getAbsolutePath()));
-    metadata.setContentType(contentType != null ? contentType : MediaType.APPLICATION_OCTET_STREAM_VALUE);
+    metadata.setContentType(
+        contentType != null ? contentType : MediaType.APPLICATION_OCTET_STREAM_VALUE);
     metadata.setContentEncoding(contentEncoding);
     metadata.setDataFile(dataFile.getAbsolutePath());
     metadata.setMetaFile(metaFile.getAbsolutePath());
 
-    BasicFileAttributes attributes = Files.readAttributes(dataFile.toPath(), BasicFileAttributes.class);
+    BasicFileAttributes attributes =
+        Files.readAttributes(dataFile.toPath(), BasicFileAttributes.class);
     metadata.setLastModificationDate(attributes.lastModifiedTime().toMillis());
 
     metadata.setUserMetaData(userMetaData);
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockRule.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockRule.java
index b0f3785692..12c1e0128d 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockRule.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockRule.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss.mock;
 
 import com.aliyun.oss.OSS;
@@ -63,9 +62,11 @@ public class AliyunOSSMockRule implements AliyunOSSTestRule {
 
   @Override
   public OSS createOSSClient() {
-    String endpoint = String.format("http://localhost:%s", properties.getOrDefault(
-        AliyunOSSMockApp.PROP_HTTP_PORT,
-        AliyunOSSMockApp.PORT_HTTP_PORT_DEFAULT));
+    String endpoint =
+        String.format(
+            "http://localhost:%s",
+            properties.getOrDefault(
+                AliyunOSSMockApp.PROP_HTTP_PORT, AliyunOSSMockApp.PORT_HTTP_PORT_DEFAULT));
     return new OSSClientBuilder().build(endpoint, "foo", "bar");
   }
 
@@ -85,13 +86,14 @@ public class AliyunOSSMockRule implements AliyunOSSTestRule {
     try {
       Files.walk(rootDir().toPath())
           .filter(p -> p.toFile().isFile())
-          .forEach(p -> {
-            try {
-              Files.delete(p);
-            } catch (IOException e) {
-              // delete this file quietly.
-            }
-          });
+          .forEach(
+              p -> {
+                try {
+                  Files.delete(p);
+                } catch (IOException e) {
+                  // delete this file quietly.
+                }
+              });
 
       createOSSClient().deleteBucket(bucket);
     } catch (IOException e) {
@@ -110,7 +112,9 @@ public class AliyunOSSMockRule implements AliyunOSSTestRule {
     public AliyunOSSTestRule build() {
       String rootDir = (String) props.get(AliyunOSSMockApp.PROP_ROOT_DIR);
       if (Strings.isNullOrEmpty(rootDir)) {
-        File dir = new File(FileUtils.getTempDirectory(), "oss-mock-file-store-" + System.currentTimeMillis());
+        File dir =
+            new File(
+                FileUtils.getTempDirectory(), "oss-mock-file-store-" + System.currentTimeMillis());
         rootDir = dir.getAbsolutePath();
         props.put(AliyunOSSMockApp.PROP_ROOT_DIR, rootDir);
       }
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/ObjectMetadata.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/ObjectMetadata.java
index 95fbd01988..5c38f61e9d 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/ObjectMetadata.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/ObjectMetadata.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss.mock;
 
 import java.util.Map;
@@ -40,7 +39,8 @@ public class ObjectMetadata {
 
   private String metaFile;
 
-  // The following getters and setters are required for Jackson ObjectMapper serialization and deserialization.
+  // The following getters and setters are required for Jackson ObjectMapper serialization and
+  // deserialization.
 
   public long getContentLength() {
     return contentLength;
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/Range.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/Range.java
index dcf1291b95..ff66e5c2a1 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/Range.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/Range.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss.mock;
 
 public class Range {
diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/TestLocalAliyunOSS.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/TestLocalAliyunOSS.java
index a2849f256d..b9acc226cc 100644
--- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/TestLocalAliyunOSS.java
+++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/TestLocalAliyunOSS.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg.aliyun.oss.mock;
 
 import com.aliyun.oss.OSS;
@@ -42,8 +41,7 @@ import org.junit.Test;
 
 public class TestLocalAliyunOSS {
 
-  @ClassRule
-  public static final AliyunOSSTestRule OSS_TEST_RULE = TestUtility.initialize();
+  @ClassRule public static final AliyunOSSTestRule OSS_TEST_RULE = TestUtility.initialize();
 
   private final OSS oss = OSS_TEST_RULE.createOSSClient();
   private final String bucketName = OSS_TEST_RULE.testBucketName();
@@ -70,7 +68,8 @@ public class TestLocalAliyunOSS {
 
   @Test
   public void testBuckets() {
-    Assume.assumeTrue("Aliyun integration test cannot delete existing bucket from test environment.",
+    Assume.assumeTrue(
+        "Aliyun integration test cannot delete existing bucket from test environment.",
         OSS_TEST_RULE.getClass() == AliyunOSSMockRule.class);
 
     Assert.assertTrue(doesBucketExist(bucketName));
@@ -85,7 +84,8 @@ public class TestLocalAliyunOSS {
 
   @Test
   public void testDeleteBucket() {
-    Assume.assumeTrue("Aliyun integration test cannot delete existing bucket from test environment.",
+    Assume.assumeTrue(
+        "Aliyun integration test cannot delete existing bucket from test environment.",
         OSS_TEST_RULE.getClass() == AliyunOSSMockRule.class);
 
     String bucketNotExist = String.format("bucket-not-existing-%s", UUID.randomUUID());
@@ -116,7 +116,8 @@ public class TestLocalAliyunOSS {
     random.nextBytes(bytes);
 
     String bucketNotExist = String.format("bucket-not-existing-%s", UUID.randomUUID());
-    assertThrows(() -> oss.putObject(bucketNotExist, "object", wrap(bytes)), OSSErrorCode.NO_SUCH_BUCKET);
+    assertThrows(
+        () -> oss.putObject(bucketNotExist, "object", wrap(bytes)), OSSErrorCode.NO_SUCH_BUCKET);
 
     PutObjectResult result = oss.putObject(bucketName, "object", wrap(bytes));
     Assert.assertEquals(AliyunOSSMockLocalStore.md5sum(wrap(bytes)), result.getETag());
diff --git a/api/src/main/java/org/apache/iceberg/Accessor.java b/api/src/main/java/org/apache/iceberg/Accessor.java
index 37f9f14a1f..2a20a04df9 100644
--- a/api/src/main/java/org/apache/iceberg/Accessor.java
+++ b/api/src/main/java/org/apache/iceberg/Accessor.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.io.Serializable;
diff --git a/api/src/main/java/org/apache/iceberg/Accessors.java b/api/src/main/java/org/apache/iceberg/Accessors.java
index e18cf32725..08233624f2 100644
--- a/api/src/main/java/org/apache/iceberg/Accessors.java
+++ b/api/src/main/java/org/apache/iceberg/Accessors.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.List;
@@ -28,21 +27,22 @@ import org.apache.iceberg.types.Types;
 
 /**
  * Position2Accessor and Position3Accessor here is an optimization. For a nested schema like:
+ *
  * <pre>
  * root
  *  |-- a: struct (nullable = false)
  *  |    |-- b: struct (nullable = false)
  *  |        | -- c: string (containsNull = false)
  * </pre>
- *  Then we will use Position3Accessor to access nested field 'c'. It can be accessed like this:
- *  {@code row.get(p0, StructLike.class).get(p1, StructLike.class).get(p2, javaClass)}.
- *  Commonly, Nested fields with depth=1 or 2 or 3 are the fields that will be accessed frequently,
- *  so this optimization will help to access this kind of schema. For schema whose depth is deeper than 3,
- *  then we will use the {@link WrappedPositionAccessor} to access recursively.
+ *
+ * Then we will use Position3Accessor to access nested field 'c'. It can be accessed like this:
+ * {@code row.get(p0, StructLike.class).get(p1, StructLike.class).get(p2, javaClass)}. Commonly,
+ * Nested fields with depth=1 or 2 or 3 are the fields that will be accessed frequently, so this
+ * optimization will help to access this kind of schema. For schema whose depth is deeper than 3,
+ * then we will use the {@link WrappedPositionAccessor} to access recursively.
  */
 public class Accessors {
-  private Accessors() {
-  }
+  private Accessors() {}
 
   public static Integer toPosition(Accessor<StructLike> accessor) {
     if (accessor instanceof PositionAccessor) {
@@ -187,8 +187,8 @@ public class Accessors {
     return new PositionAccessor(pos, type);
   }
 
-  private static Accessor<StructLike> newAccessor(int pos, boolean isOptional,
-                                                  Accessor<StructLike> accessor) {
+  private static Accessor<StructLike> newAccessor(
+      int pos, boolean isOptional, Accessor<StructLike> accessor) {
     if (isOptional) {
       // the wrapped position handles null layers
       return new WrappedPositionAccessor(pos, accessor);
@@ -201,7 +201,8 @@ public class Accessors {
     }
   }
 
-  private static class BuildPositionAccessors extends TypeUtil.SchemaVisitor<Map<Integer, Accessor<StructLike>>> {
+  private static class BuildPositionAccessors
+      extends TypeUtil.SchemaVisitor<Map<Integer, Accessor<StructLike>>> {
 
     @Override
     public Map<Integer, Accessor<StructLike>> schema(
diff --git a/api/src/main/java/org/apache/iceberg/AddedRowsScanTask.java b/api/src/main/java/org/apache/iceberg/AddedRowsScanTask.java
index ba7f730472..9cee9d942c 100644
--- a/api/src/main/java/org/apache/iceberg/AddedRowsScanTask.java
+++ b/api/src/main/java/org/apache/iceberg/AddedRowsScanTask.java
@@ -16,27 +16,29 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.List;
 
 /**
  * A scan task for inserts generated by adding a data file to the table.
- * <p>
- * Note that added data files may have matching delete files. This may happen if a matching position
- * delete file is committed in the same snapshot or if changes for multiple snapshots are squashed together.
- * <p>
- * Suppose snapshot S1 adds data files F1, F2, F3 and a position delete file, D1, that marks particular
- * records in F1 as deleted. A scan for changes generated by S1 should include the following tasks:
+ *
+ * <p>Note that added data files may have matching delete files. This may happen if a matching
+ * position delete file is committed in the same snapshot or if changes for multiple snapshots are
+ * squashed together.
+ *
+ * <p>Suppose snapshot S1 adds data files F1, F2, F3 and a position delete file, D1, that marks
+ * particular records in F1 as deleted. A scan for changes generated by S1 should include the
+ * following tasks:
+ *
  * <ul>
- *   <li>AddedRowsScanTask(file=F1, deletes=[D1], snapshot=S1)</li>
- *   <li>AddedRowsScanTask(file=F2, deletes=[], snapshot=S1)</li>
- *   <li>AddedRowsScanTask(file=F3, deletes=[], snapshot=S1)</li>
+ *   <li>AddedRowsScanTask(file=F1, deletes=[D1], snapshot=S1)
+ *   <li>AddedRowsScanTask(file=F2, deletes=[], snapshot=S1)
+ *   <li>AddedRowsScanTask(file=F3, deletes=[], snapshot=S1)
  * </ul>
- * <p>
- * Readers consuming these tasks should produce added records with metadata like change ordinal and
- * commit snapshot ID.
+ *
+ * <p>Readers consuming these tasks should produce added records with metadata like change ordinal
+ * and commit snapshot ID.
  */
 public interface AddedRowsScanTask extends ChangelogScanTask, ContentScanTask<DataFile> {
   /**
diff --git a/api/src/main/java/org/apache/iceberg/AppendFiles.java b/api/src/main/java/org/apache/iceberg/AppendFiles.java
index aefe1d1fdd..1fc249acf6 100644
--- a/api/src/main/java/org/apache/iceberg/AppendFiles.java
+++ b/api/src/main/java/org/apache/iceberg/AppendFiles.java
@@ -16,16 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 /**
  * API for appending new files in a table.
- * <p>
- * This API accumulates file additions, produces a new {@link Snapshot} of the table, and commits
+ *
+ * <p>This API accumulates file additions, produces a new {@link Snapshot} of the table, and commits
  * that snapshot as the current.
- * <p>
- * When committing, these changes will be applied to the latest table snapshot. Commit conflicts
+ *
+ * <p>When committing, these changes will be applied to the latest table snapshot. Commit conflicts
  * will be resolved by applying the changes to the new latest snapshot and reattempting the commit.
  */
 public interface AppendFiles extends SnapshotUpdate<AppendFiles> {
@@ -39,20 +38,20 @@ public interface AppendFiles extends SnapshotUpdate<AppendFiles> {
 
   /**
    * Append a {@link ManifestFile} to the table.
-   * <p>
-   * The manifest must contain only appended files. All files in the manifest will be appended to
+   *
+   * <p>The manifest must contain only appended files. All files in the manifest will be appended to
    * the table in the snapshot created by this update.
-   * <p>
-   * By default, the manifest will be rewritten to assign all entries this update's snapshot ID.
-   * In that case, it is always the responsibility of the caller to manage the lifecycle of
-   * the original manifest.
-   * <p>
-   * If manifest entries are allowed to inherit the snapshot ID assigned on commit, the manifest
+   *
+   * <p>By default, the manifest will be rewritten to assign all entries this update's snapshot ID.
+   * In that case, it is always the responsibility of the caller to manage the lifecycle of the
+   * original manifest.
+   *
+   * <p>If manifest entries are allowed to inherit the snapshot ID assigned on commit, the manifest
    * should never be deleted manually if the commit succeeds as it will become part of the table
    * metadata and will be cleaned up on expiry. If the manifest gets merged with others while
-   * preparing a new snapshot, it will be deleted automatically if this operation is successful.
-   * If the commit fails, the manifest will never be deleted and it is up to the caller whether
-   * to delete or reuse it.
+   * preparing a new snapshot, it will be deleted automatically if this operation is successful. If
+   * the commit fails, the manifest will never be deleted and it is up to the caller whether to
+   * delete or reuse it.
    *
    * @param file a manifest file
    * @return this for method chaining
diff --git a/api/src/main/java/org/apache/iceberg/BaseScanTaskGroup.java b/api/src/main/java/org/apache/iceberg/BaseScanTaskGroup.java
index 1d85077bca..706ca344e6 100644
--- a/api/src/main/java/org/apache/iceberg/BaseScanTaskGroup.java
+++ b/api/src/main/java/org/apache/iceberg/BaseScanTaskGroup.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.Collection;
@@ -41,7 +40,8 @@ public class BaseScanTaskGroup<T extends ScanTask> implements ScanTaskGroup<T> {
     if (taskList == null) {
       synchronized (this) {
         if (taskList == null) {
-          ImmutableList.Builder<T> listBuilder = ImmutableList.builderWithExpectedSize(tasks.length);
+          ImmutableList.Builder<T> listBuilder =
+              ImmutableList.builderWithExpectedSize(tasks.length);
           for (Object task : tasks) {
             listBuilder.add((T) task);
           }
@@ -55,8 +55,6 @@ public class BaseScanTaskGroup<T extends ScanTask> implements ScanTaskGroup<T> {
 
   @Override
   public String toString() {
-    return MoreObjects.toStringHelper(this)
-        .add("tasks", Joiner.on(", ").join(tasks))
-        .toString();
+    return MoreObjects.toStringHelper(this).add("tasks", Joiner.on(", ").join(tasks)).toString();
   }
 }
diff --git a/api/src/main/java/org/apache/iceberg/ChangelogOperation.java b/api/src/main/java/org/apache/iceberg/ChangelogOperation.java
index 3d6ad72c90..5a7c86d1eb 100644
--- a/api/src/main/java/org/apache/iceberg/ChangelogOperation.java
+++ b/api/src/main/java/org/apache/iceberg/ChangelogOperation.java
@@ -16,12 +16,10 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
-/**
- * An enum representing possible operations in a changelog.
- */
+/** An enum representing possible operations in a changelog. */
 public enum ChangelogOperation {
-  INSERT, DELETE
+  INSERT,
+  DELETE
 }
diff --git a/api/src/main/java/org/apache/iceberg/ChangelogScanTask.java b/api/src/main/java/org/apache/iceberg/ChangelogScanTask.java
index f74fea7478..2de17fc0c7 100644
--- a/api/src/main/java/org/apache/iceberg/ChangelogScanTask.java
+++ b/api/src/main/java/org/apache/iceberg/ChangelogScanTask.java
@@ -16,26 +16,20 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
-/**
- * A changelog scan task.
- */
+/** A changelog scan task. */
 public interface ChangelogScanTask extends ScanTask {
-  /**
-   * Returns the type of changes produced by this task (i.e. insert/delete).
-   */
+  /** Returns the type of changes produced by this task (i.e. insert/delete). */
   ChangelogOperation operation();
 
   /**
    * Returns the ordinal of changes produced by this task. This number indicates the order in which
-   * changes produced by this scan must be applied. Operations with a lower ordinal must be applied first.
+   * changes produced by this scan must be applied. Operations with a lower ordinal must be applied
+   * first.
    */
   int changeOrdinal();
 
-  /**
-   * Returns the snapshot ID in which the changes were committed.
-   */
+  /** Returns the snapshot ID in which the changes were committed. */
   long commitSnapshotId();
 }
diff --git a/api/src/main/java/org/apache/iceberg/CombinedScanTask.java b/api/src/main/java/org/apache/iceberg/CombinedScanTask.java
index 956fc333d7..3d0ea33a9e 100644
--- a/api/src/main/java/org/apache/iceberg/CombinedScanTask.java
+++ b/api/src/main/java/org/apache/iceberg/CombinedScanTask.java
@@ -16,17 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.Collection;
 
-/**
- * A scan task made of several ranges from files.
- */
+/** A scan task made of several ranges from files. */
 public interface CombinedScanTask extends ScanTaskGroup<FileScanTask> {
   /**
    * Return the {@link FileScanTask tasks} in this combined task.
+   *
    * @return a Collection of FileScanTask instances.
    */
   Collection<FileScanTask> files();
diff --git a/api/src/main/java/org/apache/iceberg/ContentFile.java b/api/src/main/java/org/apache/iceberg/ContentFile.java
index 1925ec0d0d..d214ee6eb5 100644
--- a/api/src/main/java/org/apache/iceberg/ContentFile.java
+++ b/api/src/main/java/org/apache/iceberg/ContentFile.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.nio.ByteBuffer;
@@ -30,13 +29,12 @@ import java.util.Map;
  */
 public interface ContentFile<F> {
   /**
-   * Returns the ordinal position of the file in a manifest, or null if it was not read from a manifest.
+   * Returns the ordinal position of the file in a manifest, or null if it was not read from a
+   * manifest.
    */
   Long pos();
 
-  /**
-   * Returns id of the partition spec used for partition metadata.
-   */
+  /** Returns id of the partition spec used for partition metadata. */
   int specId();
 
   /**
@@ -44,29 +42,19 @@ public interface ContentFile<F> {
    */
   FileContent content();
 
-  /**
-   * Returns fully qualified path to the file, suitable for constructing a Hadoop Path.
-   */
+  /** Returns fully qualified path to the file, suitable for constructing a Hadoop Path. */
   CharSequence path();
 
-  /**
-   * Returns format of the file.
-   */
+  /** Returns format of the file. */
   FileFormat format();
 
-  /**
-   * Returns partition for this file as a {@link StructLike}.
-   */
+  /** Returns partition for this file as a {@link StructLike}. */
   StructLike partition();
 
-  /**
-   * Returns the number of top-level records in the file.
-   */
+  /** Returns the number of top-level records in the file. */
   long recordCount();
 
-  /**
-   * Returns the file size in bytes.
-   */
+  /** Returns the file size in bytes. */
   long fileSizeInBytes();
 
   /**
@@ -79,24 +67,16 @@ public interface ContentFile<F> {
    */
   Map<Integer, Long> valueCounts();
 
-  /**
-   * Returns if collected, map from column ID to its null value count, null otherwise.
-   */
+  /** Returns if collected, map from column ID to its null value count, null otherwise. */
   Map<Integer, Long> nullValueCounts();
 
-  /**
-   * Returns if collected, map from column ID to its NaN value count, null otherwise.
-   */
+  /** Returns if collected, map from column ID to its NaN value count, null otherwise. */
   Map<Integer, Long> nanValueCounts();
 
-  /**
-   * Returns if collected, map from column ID to value lower bounds, null otherwise.
-   */
+  /** Returns if collected, map from column ID to value lower bounds, null otherwise. */
   Map<Integer, ByteBuffer> lowerBounds();
 
-  /**
-   * Returns if collected, map from column ID to value upper bounds, null otherwise.
-   */
+  /** Returns if collected, map from column ID to value upper bounds, null otherwise. */
   Map<Integer, ByteBuffer> upperBounds();
 
   /**
@@ -106,53 +86,53 @@ public interface ContentFile<F> {
 
   /**
    * Returns list of recommended split locations, if applicable, null otherwise.
-   * <p>
-   * When available, this information is used for planning scan tasks whose boundaries
-   * are determined by these offsets. The returned list must be sorted in ascending order.
+   *
+   * <p>When available, this information is used for planning scan tasks whose boundaries are
+   * determined by these offsets. The returned list must be sorted in ascending order.
    */
   List<Long> splitOffsets();
 
   /**
    * Returns the set of field IDs used for equality comparison, in equality delete files.
-   * <p>
-   * An equality delete file may contain additional data fields that are not used by equality
+   *
+   * <p>An equality delete file may contain additional data fields that are not used by equality
    * comparison. The subset of columns in a delete file to be used in equality comparison are
-   * tracked by ID. Extra columns can be used to reconstruct changes and metrics from extra
-   * columns are used during job planning.
+   * tracked by ID. Extra columns can be used to reconstruct changes and metrics from extra columns
+   * are used during job planning.
    *
    * @return IDs of the fields used in equality comparison with the records in this delete file
    */
   List<Integer> equalityFieldIds();
 
   /**
-   * Returns the sort order id of this file, which describes how the file is ordered.
-   * This information will be useful for merging data and equality delete files more efficiently
-   * when they share the same sort order id.
+   * Returns the sort order id of this file, which describes how the file is ordered. This
+   * information will be useful for merging data and equality delete files more efficiently when
+   * they share the same sort order id.
    */
   default Integer sortOrderId() {
     return null;
   }
 
   /**
-   * Copies this file. Manifest readers can reuse file instances; use
-   * this method to copy data when collecting files from tasks.
+   * Copies this file. Manifest readers can reuse file instances; use this method to copy data when
+   * collecting files from tasks.
    *
    * @return a copy of this data file
    */
   F copy();
 
   /**
-   * Copies this file without file stats. Manifest readers can reuse file instances; use
-   * this method to copy data without stats when collecting files.
+   * Copies this file without file stats. Manifest readers can reuse file instances; use this method
+   * to copy data without stats when collecting files.
    *
-   * @return a copy of this data file, without lower bounds, upper bounds, value counts,
-   *         null value counts, or nan value counts
+   * @return a copy of this data file, without lower bounds, upper bounds, value counts, null value
+   *     counts, or nan value counts
    */
   F copyWithoutStats();
 
   /**
-   * Copies this file (potentially without file stats). Manifest readers can reuse file instances; use
-   * this method to copy data when collecting files from tasks.
+   * Copies this file (potentially without file stats). Manifest readers can reuse file instances;
+   * use this method to copy data when collecting files from tasks.
    *
    * @param withStats Will copy this file without file stats if set to <code>false</code>.
    * @return a copy of this data file. If <code>withStats</code> is set to <code>false</code> the
diff --git a/api/src/main/java/org/apache/iceberg/ContentScanTask.java b/api/src/main/java/org/apache/iceberg/ContentScanTask.java
index 0077c4c781..1afaf7f1d5 100644
--- a/api/src/main/java/org/apache/iceberg/ContentScanTask.java
+++ b/api/src/main/java/org/apache/iceberg/ContentScanTask.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import org.apache.iceberg.expressions.Expression;
@@ -57,9 +56,9 @@ public interface ContentScanTask<F extends ContentFile<F>> extends ScanTask {
 
   /**
    * Returns the residual expression that should be applied to rows in this file scan.
-   * <p>
-   * The residual expression for a file is a filter expression created by partially evaluating the scan's filter
-   * using the file's partition data.
+   *
+   * <p>The residual expression for a file is a filter expression created by partially evaluating
+   * the scan's filter using the file's partition data.
    *
    * @return a residual expression to apply to rows from this scan
    */
diff --git a/api/src/main/java/org/apache/iceberg/DataFile.java b/api/src/main/java/org/apache/iceberg/DataFile.java
index 3d75052924..59b329c500 100644
--- a/api/src/main/java/org/apache/iceberg/DataFile.java
+++ b/api/src/main/java/org/apache/iceberg/DataFile.java
@@ -16,9 +16,11 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
+import static org.apache.iceberg.types.Types.NestedField.optional;
+import static org.apache.iceberg.types.Types.NestedField.required;
+
 import java.util.List;
 import org.apache.iceberg.types.Types;
 import org.apache.iceberg.types.Types.BinaryType;
@@ -29,39 +31,72 @@ import org.apache.iceberg.types.Types.MapType;
 import org.apache.iceberg.types.Types.StringType;
 import org.apache.iceberg.types.Types.StructType;
 
-import static org.apache.iceberg.types.Types.NestedField.optional;
-import static org.apache.iceberg.types.Types.NestedField.required;
-
-/**
- * Interface for data files listed in a table manifest.
- */
+/** Interface for data files listed in a table manifest. */
 public interface DataFile extends ContentFile<DataFile> {
   // fields for adding delete data files
-  Types.NestedField CONTENT = optional(134, "content", IntegerType.get(),
-      "Contents of the file: 0=data, 1=position deletes, 2=equality deletes");
-  Types.NestedField FILE_PATH = required(100, "file_path", StringType.get(), "Location URI with FS scheme");
-  Types.NestedField FILE_FORMAT = required(101, "file_format", StringType.get(),
-      "File format name: avro, orc, or parquet");
-  Types.NestedField RECORD_COUNT = required(103, "record_count", LongType.get(), "Number of records in the file");
-  Types.NestedField FILE_SIZE = required(104, "file_size_in_bytes", LongType.get(), "Total file size in bytes");
-  Types.NestedField COLUMN_SIZES = optional(108, "column_sizes", MapType.ofRequired(117, 118,
-      IntegerType.get(), LongType.get()), "Map of column id to total size on disk");
-  Types.NestedField VALUE_COUNTS = optional(109, "value_counts", MapType.ofRequired(119, 120,
-      IntegerType.get(), LongType.get()), "Map of column id to total count, including null and NaN");
-  Types.NestedField NULL_VALUE_COUNTS = optional(110, "null_value_counts", MapType.ofRequired(121, 122,
-      IntegerType.get(), LongType.get()), "Map of column id to null value count");
-  Types.NestedField NAN_VALUE_COUNTS = optional(137, "nan_value_counts", MapType.ofRequired(138, 139,
-      IntegerType.get(), LongType.get()), "Map of column id to number of NaN values in the column");
-  Types.NestedField LOWER_BOUNDS = optional(125, "lower_bounds", MapType.ofRequired(126, 127,
-      IntegerType.get(), BinaryType.get()), "Map of column id to lower bound");
-  Types.NestedField UPPER_BOUNDS = optional(128, "upper_bounds", MapType.ofRequired(129, 130,
-      IntegerType.get(), BinaryType.get()), "Map of column id to upper bound");
-  Types.NestedField KEY_METADATA = optional(131, "key_metadata", BinaryType.get(), "Encryption key metadata blob");
-  Types.NestedField SPLIT_OFFSETS = optional(132, "split_offsets", ListType.ofRequired(133, LongType.get()),
-      "Splittable offsets");
-  Types.NestedField EQUALITY_IDS = optional(135, "equality_ids", ListType.ofRequired(136, IntegerType.get()),
-      "Equality comparison field IDs");
-  Types.NestedField SORT_ORDER_ID = optional(140, "sort_order_id", IntegerType.get(), "Sort order ID");
+  Types.NestedField CONTENT =
+      optional(
+          134,
+          "content",
+          IntegerType.get(),
+          "Contents of the file: 0=data, 1=position deletes, 2=equality deletes");
+  Types.NestedField FILE_PATH =
+      required(100, "file_path", StringType.get(), "Location URI with FS scheme");
+  Types.NestedField FILE_FORMAT =
+      required(101, "file_format", StringType.get(), "File format name: avro, orc, or parquet");
+  Types.NestedField RECORD_COUNT =
+      required(103, "record_count", LongType.get(), "Number of records in the file");
+  Types.NestedField FILE_SIZE =
+      required(104, "file_size_in_bytes", LongType.get(), "Total file size in bytes");
+  Types.NestedField COLUMN_SIZES =
+      optional(
+          108,
+          "column_sizes",
+          MapType.ofRequired(117, 118, IntegerType.get(), LongType.get()),
+          "Map of column id to total size on disk");
+  Types.NestedField VALUE_COUNTS =
+      optional(
+          109,
+          "value_counts",
+          MapType.ofRequired(119, 120, IntegerType.get(), LongType.get()),
+          "Map of column id to total count, including null and NaN");
+  Types.NestedField NULL_VALUE_COUNTS =
+      optional(
+          110,
+          "null_value_counts",
+          MapType.ofRequired(121, 122, IntegerType.get(), LongType.get()),
+          "Map of column id to null value count");
+  Types.NestedField NAN_VALUE_COUNTS =
+      optional(
+          137,
+          "nan_value_counts",
+          MapType.ofRequired(138, 139, IntegerType.get(), LongType.get()),
+          "Map of column id to number of NaN values in the column");
+  Types.NestedField LOWER_BOUNDS =
+      optional(
+          125,
+          "lower_bounds",
+          MapType.ofRequired(126, 127, IntegerType.get(), BinaryType.get()),
+          "Map of column id to lower bound");
+  Types.NestedField UPPER_BOUNDS =
+      optional(
+          128,
+          "upper_bounds",
+          MapType.ofRequired(129, 130, IntegerType.get(), BinaryType.get()),
+          "Map of column id to upper bound");
+  Types.NestedField KEY_METADATA =
+      optional(131, "key_metadata", BinaryType.get(), "Encryption key metadata blob");
+  Types.NestedField SPLIT_OFFSETS =
+      optional(
+          132, "split_offsets", ListType.ofRequired(133, LongType.get()), "Splittable offsets");
+  Types.NestedField EQUALITY_IDS =
+      optional(
+          135,
+          "equality_ids",
+          ListType.ofRequired(136, IntegerType.get()),
+          "Equality comparison field IDs");
+  Types.NestedField SORT_ORDER_ID =
+      optional(140, "sort_order_id", IntegerType.get(), "Sort order ID");
   Types.NestedField SPEC_ID = optional(141, "spec_id", IntegerType.get(), "Partition spec ID");
 
   int PARTITION_ID = 102;
@@ -88,13 +123,10 @@ public interface DataFile extends ContentFile<DataFile> {
         KEY_METADATA,
         SPLIT_OFFSETS,
         EQUALITY_IDS,
-        SORT_ORDER_ID
-    );
+        SORT_ORDER_ID);
   }
 
-  /**
-   * @return the content stored in the file; one of DATA, POSITION_DELETES, or EQUALITY_DELETES
-   */
+  /** @return the content stored in the file; one of DATA, POSITION_DELETES, or EQUALITY_DELETES */
   @Override
   default FileContent content() {
     return FileContent.DATA;
diff --git a/api/src/main/java/org/apache/iceberg/DataOperations.java b/api/src/main/java/org/apache/iceberg/DataOperations.java
index 143f16a7b3..6a80b6b712 100644
--- a/api/src/main/java/org/apache/iceberg/DataOperations.java
+++ b/api/src/main/java/org/apache/iceberg/DataOperations.java
@@ -16,45 +16,43 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 /**
  * Data operations that produce snapshots.
- * <p>
- * A snapshot can return the operation that created the snapshot to help other components ignore
+ *
+ * <p>A snapshot can return the operation that created the snapshot to help other components ignore
  * snapshots that are not needed for some tasks. For example, snapshot expiration does not need to
  * clean up deleted files for appends, which have no deleted files.
  */
 public class DataOperations {
-  private DataOperations() {
-  }
+  private DataOperations() {}
 
   /**
    * New data is appended to the table and no data is removed or deleted.
-   * <p>
-   * This operation is implemented by {@link AppendFiles}.
+   *
+   * <p>This operation is implemented by {@link AppendFiles}.
    */
   public static final String APPEND = "append";
 
   /**
    * Files are removed and replaced, without changing the data in the table.
-   * <p>
-   * This operation is implemented by {@link RewriteFiles}.
+   *
+   * <p>This operation is implemented by {@link RewriteFiles}.
    */
   public static final String REPLACE = "replace";
 
   /**
    * New data is added to overwrite existing data.
-   * <p>
-   * This operation is implemented by {@link OverwriteFiles} and {@link ReplacePartitions}.
+   *
+   * <p>This operation is implemented by {@link OverwriteFiles} and {@link ReplacePartitions}.
    */
   public static final String OVERWRITE = "overwrite";
 
   /**
    * Data is deleted from the table and no data is added.
-   * <p>
-   * This operation is implemented by {@link DeleteFiles}.
+   *
+   * <p>This operation is implemented by {@link DeleteFiles}.
    */
   public static final String DELETE = "delete";
 }
diff --git a/api/src/main/java/org/apache/iceberg/DataTask.java b/api/src/main/java/org/apache/iceberg/DataTask.java
index f2a8d2a9d8..8ffca76829 100644
--- a/api/src/main/java/org/apache/iceberg/DataTask.java
+++ b/api/src/main/java/org/apache/iceberg/DataTask.java
@@ -16,14 +16,11 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import org.apache.iceberg.io.CloseableIterable;
 
-/**
- * A task that returns data as {@link StructLike rows} instead of where to read data.
- */
+/** A task that returns data as {@link StructLike rows} instead of where to read data. */
 public interface DataTask extends FileScanTask {
   @Override
   default boolean isDataTask() {
@@ -35,8 +32,6 @@ public interface DataTask extends FileScanTask {
     return this;
   }
 
-  /**
-   * Returns an iterable of {@link StructLike} rows.
-   */
+  /** Returns an iterable of {@link StructLike} rows. */
   CloseableIterable<StructLike> rows();
 }
diff --git a/api/src/main/java/org/apache/iceberg/DeleteFile.java b/api/src/main/java/org/apache/iceberg/DeleteFile.java
index 9adc0fb547..0f8087e6a0 100644
--- a/api/src/main/java/org/apache/iceberg/DeleteFile.java
+++ b/api/src/main/java/org/apache/iceberg/DeleteFile.java
@@ -16,19 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.List;
 
-/**
- * Interface for delete files listed in a table delete manifest.
- */
+/** Interface for delete files listed in a table delete manifest. */
 public interface DeleteFile extends ContentFile<DeleteFile> {
   /**
-   * @return List of recommended split locations, if applicable, null otherwise.
-   * When available, this information is used for planning scan tasks whose boundaries
-   * are determined by these offsets. The returned list must be sorted in ascending order.
+   * @return List of recommended split locations, if applicable, null otherwise. When available,
+   *     this information is used for planning scan tasks whose boundaries are determined by these
+   *     offsets. The returned list must be sorted in ascending order.
    */
   @Override
   default List<Long> splitOffsets() {
diff --git a/api/src/main/java/org/apache/iceberg/DeleteFiles.java b/api/src/main/java/org/apache/iceberg/DeleteFiles.java
index 42a89528ff..74d31a6dad 100644
--- a/api/src/main/java/org/apache/iceberg/DeleteFiles.java
+++ b/api/src/main/java/org/apache/iceberg/DeleteFiles.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import org.apache.iceberg.exceptions.ValidationException;
@@ -25,18 +24,18 @@ import org.apache.iceberg.expressions.Projections;
 
 /**
  * API for deleting files from a table.
- * <p>
- * This API accumulates file deletions, produces a new {@link Snapshot} of the table, and commits
+ *
+ * <p>This API accumulates file deletions, produces a new {@link Snapshot} of the table, and commits
  * that snapshot as the current.
- * <p>
- * When committing, these changes will be applied to the latest table snapshot. Commit conflicts
+ *
+ * <p>When committing, these changes will be applied to the latest table snapshot. Commit conflicts
  * will be resolved by applying the changes to the new latest snapshot and reattempting the commit.
  */
 public interface DeleteFiles extends SnapshotUpdate<DeleteFiles> {
   /**
    * Delete a file path from the underlying table.
-   * <p>
-   * To remove a file from the table, this path must equal a path in the table's metadata. Paths
+   *
+   * <p>To remove a file from the table, this path must equal a path in the table's metadata. Paths
    * that are different but equivalent will not be removed. For example, file:/path/file.avro is
    * equivalent to file:///path/file.avro, but would not remove the latter path from the table.
    *
@@ -58,15 +57,15 @@ public interface DeleteFiles extends SnapshotUpdate<DeleteFiles> {
 
   /**
    * Delete files that match an {@link Expression} on data rows from the table.
-   * <p>
-   * A file is selected to be deleted by the expression if it could contain any rows that match the
-   * expression (candidate files are selected using an
-   * {@link Projections#inclusive(PartitionSpec) inclusive projection}). These candidate files are
-   * deleted if all of the rows in the file must match the expression (the partition data matches
-   * the expression's {@link Projections#strict(PartitionSpec)} strict projection}). This guarantees
+   *
+   * <p>A file is selected to be deleted by the expression if it could contain any rows that match
+   * the expression (candidate files are selected using an {@link
+   * Projections#inclusive(PartitionSpec) inclusive projection}). These candidate files are deleted
+   * if all of the rows in the file must match the expression (the partition data matches the
+   * expression's {@link Projections#strict(PartitionSpec)} strict projection}). This guarantees
    * that files are deleted if and only if all rows in the file must match the expression.
-   * <p>
-   * Files that may contain some rows that match the expression and some rows that do not will
+   *
+   * <p>Files that may contain some rows that match the expression and some rows that do not will
    * result in a {@link ValidationException}.
    *
    * @param expr an expression on rows in the table
diff --git a/api/src/main/java/org/apache/iceberg/DeletedDataFileScanTask.java b/api/src/main/java/org/apache/iceberg/DeletedDataFileScanTask.java
index 0ef5f0946b..5f744a0221 100644
--- a/api/src/main/java/org/apache/iceberg/DeletedDataFileScanTask.java
+++ b/api/src/main/java/org/apache/iceberg/DeletedDataFileScanTask.java
@@ -16,30 +16,32 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.List;
 
 /**
  * A scan task for deletes generated by removing a data file from the table.
- * <p>
- * Note that all historical delete files added earlier must be applied while reading the data file.
- * This is required to output only those data records that were live when the data file was removed.
- * <p>
- * Suppose snapshot S1 contains data files F1, F2, F3. Then snapshot S2 adds a position delete file, D1,
- * that deletes records from F2 and snapshot S3 removes F2 entirely. A scan for changes generated by S3
- * should include the following task:
+ *
+ * <p>Note that all historical delete files added earlier must be applied while reading the data
+ * file. This is required to output only those data records that were live when the data file was
+ * removed.
+ *
+ * <p>Suppose snapshot S1 contains data files F1, F2, F3. Then snapshot S2 adds a position delete
+ * file, D1, that deletes records from F2 and snapshot S3 removes F2 entirely. A scan for changes
+ * generated by S3 should include the following task:
+ *
  * <ul>
- *   <li>DeletedDataFileScanTask(file=F2, existing-deletes=[D1], snapshot=S3)</li>
+ *   <li>DeletedDataFileScanTask(file=F2, existing-deletes=[D1], snapshot=S3)
  * </ul>
- * <p>
- * Readers consuming these tasks should produce deleted records with metadata like change ordinal and
- * commit snapshot ID.
+ *
+ * <p>Readers consuming these tasks should produce deleted records with metadata like change ordinal
+ * and commit snapshot ID.
  */
 public interface DeletedDataFileScanTask extends ChangelogScanTask, ContentScanTask<DataFile> {
   /**
-   * A list of previously added {@link DeleteFile delete files} to apply when reading the data file in this task.
+   * A list of previously added {@link DeleteFile delete files} to apply when reading the data file
+   * in this task.
    *
    * @return a list of delete files to apply
    */
diff --git a/api/src/main/java/org/apache/iceberg/DeletedRowsScanTask.java b/api/src/main/java/org/apache/iceberg/DeletedRowsScanTask.java
index cd8ebfac8b..94a28f53cc 100644
--- a/api/src/main/java/org/apache/iceberg/DeletedRowsScanTask.java
+++ b/api/src/main/java/org/apache/iceberg/DeletedRowsScanTask.java
@@ -16,31 +16,32 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.List;
 
 /**
  * A scan task for deletes generated by adding delete files to the table.
- * <p>
- * Suppose snapshot S1 contains data files F1, F2, F3. Then snapshot S2 adds a position delete file, D1,
- * that deletes records from F2 and snapshot S3 adds an equality delete file, D2, that removes records
- * from F1, F2, F3. A scan for changes from S2 to S3 (inclusive) should include the following tasks:
+ *
+ * <p>Suppose snapshot S1 contains data files F1, F2, F3. Then snapshot S2 adds a position delete
+ * file, D1, that deletes records from F2 and snapshot S3 adds an equality delete file, D2, that
+ * removes records from F1, F2, F3. A scan for changes from S2 to S3 (inclusive) should include the
+ * following tasks:
+ *
  * <ul>
- *   <li>DeletedRowsScanTask(file=F2, added-deletes=[D1], existing-deletes=[], snapshot=S2)</li>
- *   <li>DeletedRowsScanTask(file=F1, added-deletes=[D2], existing-deletes=[], snapshot=S3)</li>
- *   <li>DeletedRowsScanTask(file=F2, added-deletes=[D2], existing-deletes=[D1], snapshot=S3)</li>
- *   <li>DeletedRowsScanTask(file=F3, added-deletes=[D2], existing-deletes=[], snapshot=S3)</li>
+ *   <li>DeletedRowsScanTask(file=F2, added-deletes=[D1], existing-deletes=[], snapshot=S2)
+ *   <li>DeletedRowsScanTask(file=F1, added-deletes=[D2], existing-deletes=[], snapshot=S3)
+ *   <li>DeletedRowsScanTask(file=F2, added-deletes=[D2], existing-deletes=[D1], snapshot=S3)
+ *   <li>DeletedRowsScanTask(file=F3, added-deletes=[D2], existing-deletes=[], snapshot=S3)
  * </ul>
- * <p>
- * Readers consuming these tasks should produce deleted records with metadata like change ordinal and
- * commit snapshot ID.
+ *
+ * <p>Readers consuming these tasks should produce deleted records with metadata like change ordinal
+ * and commit snapshot ID.
  */
 public interface DeletedRowsScanTask extends ChangelogScanTask, ContentScanTask<DataFile> {
   /**
-   * A list of added {@link DeleteFile delete files} that apply to the task's data file.
-   * Records removed by these delete files should appear as deletes in the changelog.
+   * A list of added {@link DeleteFile delete files} that apply to the task's data file. Records
+   * removed by these delete files should appear as deletes in the changelog.
    *
    * @return a list of added delete files
    */
@@ -48,8 +49,8 @@ public interface DeletedRowsScanTask extends ChangelogScanTask, ContentScanTask<
 
   /**
    * A list of {@link DeleteFile delete files} that existed before and must be applied prior to
-   * determining which records are deleted by delete files in {@link #addedDeletes()}.
-   * Records removed by these delete files should not appear in the changelog.
+   * determining which records are deleted by delete files in {@link #addedDeletes()}. Records
+   * removed by these delete files should not appear in the changelog.
    *
    * @return a list of existing delete files
    */
diff --git a/api/src/main/java/org/apache/iceberg/DistributionMode.java b/api/src/main/java/org/apache/iceberg/DistributionMode.java
index fbe6c6a558..b4f2649ce1 100644
--- a/api/src/main/java/org/apache/iceberg/DistributionMode.java
+++ b/api/src/main/java/org/apache/iceberg/DistributionMode.java
@@ -16,27 +16,30 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.Locale;
 import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
 
 /**
- * Enum of supported write distribution mode, it defines the write behavior of batch or streaming job:
- * <p>
- * 1. none: don't shuffle rows. It is suitable for scenarios where the rows are located in only few
- * partitions, otherwise that may produce too many small files because each task is writing rows into different
- * partitions randomly.
- * <p>
- * 2. hash: hash distribute by partition key, which is suitable for the scenarios where the rows are located
- * into different partitions evenly.
- * <p>
- * 3. range: range distribute by partition key (or sort key if table has an {@link SortOrder}), which is suitable
- * for the scenarios where rows are located into different partitions with skew distribution.
+ * Enum of supported write distribution mode, it defines the write behavior of batch or streaming
+ * job:
+ *
+ * <p>1. none: don't shuffle rows. It is suitable for scenarios where the rows are located in only
+ * few partitions, otherwise that may produce too many small files because each task is writing rows
+ * into different partitions randomly.
+ *
+ * <p>2. hash: hash distribute by partition key, which is suitable for the scenarios where the rows
+ * are located into different partitions evenly.
+ *
+ * <p>3. range: range distribute by partition key (or sort key if table has an {@link SortOrder}),
+ * which is suitable for the scenarios where rows are located into different partitions with skew
+ * distribution.
  */
 public enum DistributionMode {
-  NONE("none"), HASH("hash"), RANGE("range");
+  NONE("none"),
+  HASH("hash"),
+  RANGE("range");
 
   private final String modeName;
 
diff --git a/api/src/main/java/org/apache/iceberg/ExpireSnapshots.java b/api/src/main/java/org/apache/iceberg/ExpireSnapshots.java
index 908b79ca83..f6524a1d4f 100644
--- a/api/src/main/java/org/apache/iceberg/ExpireSnapshots.java
+++ b/api/src/main/java/org/apache/iceberg/ExpireSnapshots.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.List;
@@ -25,18 +24,18 @@ import java.util.function.Consumer;
 
 /**
  * API for removing old {@link Snapshot snapshots} from a table.
- * <p>
- * This API accumulates snapshot deletions and commits the new list to the table. This API does not
- * allow deleting the current snapshot.
- * <p>
- * When committing, these changes will be applied to the latest table metadata. Commit conflicts
+ *
+ * <p>This API accumulates snapshot deletions and commits the new list to the table. This API does
+ * not allow deleting the current snapshot.
+ *
+ * <p>When committing, these changes will be applied to the latest table metadata. Commit conflicts
  * will be resolved by applying the changes to the new latest metadata and reattempting the commit.
- * <p>
- * Manifest files that are no longer used by valid snapshots will be deleted. Data files that were
- * deleted by snapshots that are expired will be deleted. {@link #deleteWith(Consumer)} can be used
- * to pass an alternative deletion method.
  *
- * {@link #apply()} returns a list of the snapshots that will be removed.
+ * <p>Manifest files that are no longer used by valid snapshots will be deleted. Data files that
+ * were deleted by snapshots that are expired will be deleted. {@link #deleteWith(Consumer)} can be
+ * used to pass an alternative deletion method.
+ *
+ * <p>{@link #apply()} returns a list of the snapshots that will be removed.
  */
 public interface ExpireSnapshots extends PendingUpdate<List<Snapshot>> {
 
@@ -58,13 +57,14 @@ public interface ExpireSnapshots extends PendingUpdate<List<Snapshot>> {
 
   /**
    * Retains the most recent ancestors of the current snapshot.
-   * <p>
-   * If a snapshot would be expired because it is older than the expiration timestamp, but is one of
-   * the {@code numSnapshots} most recent ancestors of the current state, it will be retained. This
-   * will not cause snapshots explicitly identified by id from expiring.
-   * <p>
-   * This may keep more than {@code numSnapshots} ancestors if snapshots are added concurrently. This
-   * may keep less than {@code numSnapshots} ancestors if the current table state does not have that many.
+   *
+   * <p>If a snapshot would be expired because it is older than the expiration timestamp, but is one
+   * of the {@code numSnapshots} most recent ancestors of the current state, it will be retained.
+   * This will not cause snapshots explicitly identified by id from expiring.
+   *
+   * <p>This may keep more than {@code numSnapshots} ancestors if snapshots are added concurrently.
+   * This may keep less than {@code numSnapshots} ancestors if the current table state does not have
+   * that many.
    *
    * @param numSnapshots the number of snapshots to retain
    * @return this for method chaining
@@ -73,11 +73,11 @@ public interface ExpireSnapshots extends PendingUpdate<List<Snapshot>> {
 
   /**
    * Passes an alternative delete implementation that will be used for manifests and data files.
-   * <p>
-   * Manifest files that are no longer used by valid snapshots will be deleted. Data files that were
-   * deleted by snapshots that are expired will be deleted.
-   * <p>
-   * If this method is not called, unnecessary manifests and data files will still be deleted.
+   *
+   * <p>Manifest files that are no longer used by valid snapshots will be deleted. Data files that
+   * were deleted by snapshots that are expired will be deleted.
+   *
+   * <p>If this method is not called, unnecessary manifests and data files will still be deleted.
    *
    * @param deleteFunc a function that will be called to delete manifests and data files
    * @return this for method chaining
@@ -86,21 +86,22 @@ public interface ExpireSnapshots extends PendingUpdate<List<Snapshot>> {
 
   /**
    * Passes an alternative executor service that will be used for manifests and data files deletion.
-   * <p>
-   * Manifest files that are no longer used by valid snapshots will be deleted. Data files that were
-   * deleted by snapshots that are expired will be deleted.
-   * <p>
-   * If this method is not called, unnecessary manifests and data files will still be deleted using a single threaded
-   * executor service.
    *
-   * @param executorService an executor service to parallelize tasks to delete manifests and data files
+   * <p>Manifest files that are no longer used by valid snapshots will be deleted. Data files that
+   * were deleted by snapshots that are expired will be deleted.
+   *
+   * <p>If this method is not called, unnecessary manifests and data files will still be deleted
+   * using a single threaded executor service.
+   *
+   * @param executorService an executor service to parallelize tasks to delete manifests and data
+   *     files
    * @return this for method chaining
    */
   ExpireSnapshots executeDeleteWith(ExecutorService executorService);
 
   /**
-   * Passes an alternative executor service that will be used for planning.
-   * If this method is not called, the default worker pool will be used.
+   * Passes an alternative executor service that will be used for planning. If this method is not
+   * called, the default worker pool will be used.
    *
    * @param executorService an executor service to plan
    * @return this for method chaining
@@ -109,9 +110,9 @@ public interface ExpireSnapshots extends PendingUpdate<List<Snapshot>> {
 
   /**
    * Allows expiration of snapshots without any cleanup of underlying manifest or data files.
-   * <p>
-   * Allows control in removing data and manifest files which may be more efficiently removed using
-   * a distributed framework through the actions API.
+   *
+   * <p>Allows control in removing data and manifest files which may be more efficiently removed
+   * using a distributed framework through the actions API.
    *
    * @param clean setting this to false will skip deleting expired manifests and files
    * @return this for method chaining
diff --git a/api/src/main/java/org/apache/iceberg/FileContent.java b/api/src/main/java/org/apache/iceberg/FileContent.java
index 67bfca79a5..2c9a2fa51b 100644
--- a/api/src/main/java/org/apache/iceberg/FileContent.java
+++ b/api/src/main/java/org/apache/iceberg/FileContent.java
@@ -16,12 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
-/**
- * Content type stored in a file, one of DATA, POSITION_DELETES, or EQUALITY_DELETES.
- */
+/** Content type stored in a file, one of DATA, POSITION_DELETES, or EQUALITY_DELETES. */
 public enum FileContent {
   DATA(0),
   POSITION_DELETES(1),
diff --git a/api/src/main/java/org/apache/iceberg/FileFormat.java b/api/src/main/java/org/apache/iceberg/FileFormat.java
index 6bcab8e81b..e00be8ca4e 100644
--- a/api/src/main/java/org/apache/iceberg/FileFormat.java
+++ b/api/src/main/java/org/apache/iceberg/FileFormat.java
@@ -16,14 +16,11 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import org.apache.iceberg.types.Comparators;
 
-/**
- * Enum of supported file formats.
- */
+/** Enum of supported file formats. */
 public enum FileFormat {
   ORC("orc", true),
   PARQUET("parquet", true),
@@ -58,7 +55,9 @@ public enum FileFormat {
   public static FileFormat fromFileName(CharSequence filename) {
     for (FileFormat format : FileFormat.values()) {
       int extStart = filename.length() - format.ext.length();
-      if (Comparators.charSequences().compare(format.ext, filename.subSequence(extStart, filename.length())) == 0) {
+      if (Comparators.charSequences()
+              .compare(format.ext, filename.subSequence(extStart, filename.length()))
+          == 0) {
         return format;
       }
     }
diff --git a/api/src/main/java/org/apache/iceberg/FileScanTask.java b/api/src/main/java/org/apache/iceberg/FileScanTask.java
index 5cc91747a7..d99d924370 100644
--- a/api/src/main/java/org/apache/iceberg/FileScanTask.java
+++ b/api/src/main/java/org/apache/iceberg/FileScanTask.java
@@ -16,14 +16,11 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.List;
 
-/**
- * A scan task over a range of bytes in a single data file.
- */
+/** A scan task over a range of bytes in a single data file. */
 public interface FileScanTask extends ContentScanTask<DataFile>, SplittableScanTask<FileScanTask> {
   /**
    * A list of {@link DeleteFile delete files} to apply when reading the task's data file.
diff --git a/api/src/main/java/org/apache/iceberg/Files.java b/api/src/main/java/org/apache/iceberg/Files.java
index 705a3e3c93..16d3b663ad 100644
--- a/api/src/main/java/org/apache/iceberg/Files.java
+++ b/api/src/main/java/org/apache/iceberg/Files.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.io.File;
@@ -34,8 +33,7 @@ import org.apache.iceberg.io.SeekableInputStream;
 
 public class Files {
 
-  private Files() {
-  }
+  private Files() {}
 
   public static OutputFile localOutput(File file) {
     return new LocalOutputFile(file);
@@ -60,8 +58,7 @@ public class Files {
 
       if (!file.getParentFile().isDirectory() && !file.getParentFile().mkdirs()) {
         throw new RuntimeIOException(
-                "Failed to create the file's directory at %s.",
-                file.getParentFile().getAbsolutePath());
+            "Failed to create the file's directory at %s.", file.getParentFile().getAbsolutePath());
       }
 
       try {
diff --git a/api/src/main/java/org/apache/iceberg/HistoryEntry.java b/api/src/main/java/org/apache/iceberg/HistoryEntry.java
index 49b12b5dd7..e61f45dc10 100644
--- a/api/src/main/java/org/apache/iceberg/HistoryEntry.java
+++ b/api/src/main/java/org/apache/iceberg/HistoryEntry.java
@@ -16,25 +16,20 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.io.Serializable;
 
 /**
  * Table history entry.
- * <p>
- * An entry contains a change to the table state. At the given timestamp, the current snapshot was
- * set to the given snapshot ID.
+ *
+ * <p>An entry contains a change to the table state. At the given timestamp, the current snapshot
+ * was set to the given snapshot ID.
  */
 public interface HistoryEntry extends Serializable {
-  /**
-   * Returns the timestamp in milliseconds of the change.
-   */
+  /** Returns the timestamp in milliseconds of the change. */
   long timestampMillis();
 
-  /**
-   * Returns ID of the new current snapshot.
-   */
+  /** Returns ID of the new current snapshot. */
   long snapshotId();
 }
diff --git a/api/src/main/java/org/apache/iceberg/IcebergBuild.java b/api/src/main/java/org/apache/iceberg/IcebergBuild.java
index 183c7c115b..e72a3c7a82 100644
--- a/api/src/main/java/org/apache/iceberg/IcebergBuild.java
+++ b/api/src/main/java/org/apache/iceberg/IcebergBuild.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.io.IOException;
@@ -29,12 +28,9 @@ import org.apache.iceberg.relocated.com.google.common.io.Resources;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/**
- * Loads iceberg-version.properties with build information.
- */
+/** Loads iceberg-version.properties with build information. */
 public class IcebergBuild {
-  private IcebergBuild() {
-  }
+  private IcebergBuild() {}
 
   private static final Logger LOG = LoggerFactory.getLogger(IcebergBuild.class);
   private static final String VERSION_PROPERTIES_FILE = "/iceberg-build.properties";
@@ -42,16 +38,14 @@ public class IcebergBuild {
 
   private static volatile boolean isLoaded = false;
 
-  private static String shortId;  // 10 character short git hash of the build
-  private static String commitId;  // 40 character full git hash of the build
+  private static String shortId; // 10 character short git hash of the build
+  private static String commitId; // 40 character full git hash of the build
   private static String branch;
   private static List<String> tags;
   private static String version;
   private static String fullVersion;
 
-  /**
-   * Loads the version.properties file for this module.
-   */
+  /** Loads the version.properties file for this module. */
   public static void loadBuildInfo() {
     Properties buildProperties = new Properties();
     try (InputStream is = readResource(VERSION_PROPERTIES_FILE)) {
@@ -115,6 +109,7 @@ public class IcebergBuild {
   }
 
   private static InputStream readResource(String resourceName) throws IOException {
-    return Resources.asByteSource(Resources.getResource(IcebergBuild.class, resourceName)).openStream();
+    return Resources.asByteSource(Resources.getResource(IcebergBuild.class, resourceName))
+        .openStream();
   }
 }
diff --git a/api/src/main/java/org/apache/iceberg/IncrementalAppendScan.java b/api/src/main/java/org/apache/iceberg/IncrementalAppendScan.java
index 24b9ed7ccb..20a0c940b8 100644
--- a/api/src/main/java/org/apache/iceberg/IncrementalAppendScan.java
+++ b/api/src/main/java/org/apache/iceberg/IncrementalAppendScan.java
@@ -16,12 +16,8 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
-
 package org.apache.iceberg;
 
-/**
- * API for configuring an incremental table scan for appends only snapshots
- */
-public interface IncrementalAppendScan extends IncrementalScan<IncrementalAppendScan, FileScanTask, CombinedScanTask> {
-}
+/** API for configuring an incremental table scan for appends only snapshots */
+public interface IncrementalAppendScan
+    extends IncrementalScan<IncrementalAppendScan, FileScanTask, CombinedScanTask> {}
diff --git a/api/src/main/java/org/apache/iceberg/IncrementalChangelogScan.java b/api/src/main/java/org/apache/iceberg/IncrementalChangelogScan.java
index 169695058d..de1c80ac79 100644
--- a/api/src/main/java/org/apache/iceberg/IncrementalChangelogScan.java
+++ b/api/src/main/java/org/apache/iceberg/IncrementalChangelogScan.java
@@ -16,12 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
-/**
- * API for configuring a scan for table changes.
- */
+/** API for configuring a scan for table changes. */
 public interface IncrementalChangelogScan
-    extends IncrementalScan<IncrementalChangelogScan, ChangelogScanTask, ScanTaskGroup<ChangelogScanTask>> {
-}
+    extends IncrementalScan<
+        IncrementalChangelogScan, ChangelogScanTask, ScanTaskGroup<ChangelogScanTask>> {}
diff --git a/api/src/main/java/org/apache/iceberg/IncrementalScan.java b/api/src/main/java/org/apache/iceberg/IncrementalScan.java
index 501c11bc11..1f7a8dff66 100644
--- a/api/src/main/java/org/apache/iceberg/IncrementalScan.java
+++ b/api/src/main/java/org/apache/iceberg/IncrementalScan.java
@@ -16,18 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
-/**
- * API for configuring an incremental scan.
- */
-public interface IncrementalScan<ThisT, T extends ScanTask, G extends ScanTaskGroup<T>> extends Scan<ThisT, T, G> {
+/** API for configuring an incremental scan. */
+public interface IncrementalScan<ThisT, T extends ScanTask, G extends ScanTaskGroup<T>>
+    extends Scan<ThisT, T, G> {
   /**
    * Instructs this scan to look for changes starting from a particular snapshot (inclusive).
-   * <p>
-   * If the start snapshot is not configured, it is defaulted to the oldest ancestor
-   * of the end snapshot (inclusive).
+   *
+   * <p>If the start snapshot is not configured, it is defaulted to the oldest ancestor of the end
+   * snapshot (inclusive).
    *
    * @param fromSnapshotId the start snapshot ID (inclusive)
    * @return this for method chaining
@@ -37,9 +35,9 @@ public interface IncrementalScan<ThisT, T extends ScanTask, G extends ScanTaskGr
 
   /**
    * Instructs this scan to look for changes starting from a particular snapshot (exclusive).
-   * <p>
-   * If the start snapshot is not configured, it is defaulted to the oldest ancestor
-   * of the end snapshot (inclusive).
+   *
+   * <p>If the start snapshot is not configured, it is defaulted to the oldest ancestor of the end
+   * snapshot (inclusive).
    *
    * @param fromSnapshotId the start snapshot ID (exclusive)
    * @return this for method chaining
@@ -49,8 +47,9 @@ public interface IncrementalScan<ThisT, T extends ScanTask, G extends ScanTaskGr
 
   /**
    * Instructs this scan to look for changes up to a particular snapshot (inclusive).
-   * <p>
-   * If the end snapshot is not configured, it is defaulted to the current table snapshot (inclusive).
+   *
+   * <p>If the end snapshot is not configured, it is defaulted to the current table snapshot
+   * (inclusive).
    *
    * @param toSnapshotId the end snapshot ID (inclusive)
    * @return this for method chaining
diff --git a/api/src/main/java/org/apache/iceberg/LockManager.java b/api/src/main/java/org/apache/iceberg/LockManager.java
index 3019687bce..8fbc1448b4 100644
--- a/api/src/main/java/org/apache/iceberg/LockManager.java
+++ b/api/src/main/java/org/apache/iceberg/LockManager.java
@@ -16,18 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.Map;
 
-/**
- * An interface for locking, used to ensure commit isolation.
- */
+/** An interface for locking, used to ensure commit isolation. */
 public interface LockManager extends AutoCloseable {
 
   /**
    * Try to acquire a lock
+   *
    * @param entityId ID of the entity to lock
    * @param ownerId ID of the owner if the lock
    * @return if the lock for the entity is acquired by the owner
@@ -37,7 +35,7 @@ public interface LockManager extends AutoCloseable {
   /**
    * Release a lock
    *
-   * exception must not be thrown for this method.
+   * <p>exception must not be thrown for this method.
    *
    * @param entityId ID of the entity to lock
    * @param ownerId ID of the owner if the lock
@@ -47,6 +45,7 @@ public interface LockManager extends AutoCloseable {
 
   /**
    * Initialize lock manager from catalog properties.
+   *
    * @param properties catalog properties
    */
   void initialize(Map<String, String> properties);
diff --git a/api/src/main/java/org/apache/iceberg/ManageSnapshots.java b/api/src/main/java/org/apache/iceberg/ManageSnapshots.java
index b534711d65..81caf3a58d 100644
--- a/api/src/main/java/org/apache/iceberg/ManageSnapshots.java
+++ b/api/src/main/java/org/apache/iceberg/ManageSnapshots.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import org.apache.iceberg.exceptions.CommitFailedException;
@@ -24,23 +23,23 @@ import org.apache.iceberg.exceptions.DuplicateWAPCommitException;
 import org.apache.iceberg.exceptions.ValidationException;
 
 /**
- * API for managing snapshots. Allows rolling table data back to a stated at an older table {@link Snapshot snapshot}.
- * Rollback:
- *  <p>
- *  This API does not allow conflicting calls to {@link #setCurrentSnapshot(long)} and
- *  {@link #rollbackToTime(long)}.
- *  <p>
- *  When committing, these changes will be applied to the current table metadata. Commit conflicts
- *  will not be resolved and will result in a {@link CommitFailedException}.
- * Cherrypick:
- *  <p>
- *  In an audit workflow, new data is written to an orphan {@link Snapshot snapshot} that is not committed as
- *  the table's current state until it is audited. After auditing a change, it may need to be applied or cherry-picked
- *  on top of the latest snapshot instead of the one that was current when the audited changes were created.
- *  This class adds support for cherry-picking the changes from an orphan snapshot by applying them to
- *  the current snapshot. The output of the operation is a new snapshot with the changes from cherry-picked
- *  snapshot.
- *  <p>
+ * API for managing snapshots. Allows rolling table data back to a stated at an older table {@link
+ * Snapshot snapshot}. Rollback:
+ *
+ * <p>This API does not allow conflicting calls to {@link #setCurrentSnapshot(long)} and {@link
+ * #rollbackToTime(long)}.
+ *
+ * <p>When committing, these changes will be applied to the current table metadata. Commit conflicts
+ * will not be resolved and will result in a {@link CommitFailedException}. Cherrypick:
+ *
+ * <p>In an audit workflow, new data is written to an orphan {@link Snapshot snapshot} that is not
+ * committed as the table's current state until it is audited. After auditing a change, it may need
+ * to be applied or cherry-picked on top of the latest snapshot instead of the one that was current
+ * when the audited changes were created. This class adds support for cherry-picking the changes
+ * from an orphan snapshot by applying them to the current snapshot. The output of the operation is
+ * a new snapshot with the changes from cherry-picked snapshot.
+ *
+ * <p>
  */
 public interface ManageSnapshots extends PendingUpdate<Snapshot> {
 
@@ -64,7 +63,9 @@ public interface ManageSnapshots extends PendingUpdate<Snapshot> {
 
   /**
    * Rollback table's state to a specific {@link Snapshot} identified by id.
-   * @param snapshotId long id of snapshot id to roll back table to. Must be an ancestor of the current snapshot
+   *
+   * @param snapshotId long id of snapshot id to roll back table to. Must be an ancestor of the
+   *     current snapshot
    * @throws IllegalArgumentException If the table has no snapshot with the given id
    * @throws ValidationException If given snapshot id is not an ancestor of the current state
    */
@@ -73,11 +74,12 @@ public interface ManageSnapshots extends PendingUpdate<Snapshot> {
   /**
    * Apply supported changes in given snapshot and create a new snapshot which will be set as the
    * current snapshot on commit.
+   *
    * @param snapshotId a snapshotId whose changes to apply
    * @return this for method chaining
    * @throws IllegalArgumentException If the table has no snapshot with the given id
-   * @throws DuplicateWAPCommitException In case of a WAP workflow and if the table has a duplicate commit with same
-   * wapId
+   * @throws DuplicateWAPCommitException In case of a WAP workflow and if the table has a duplicate
+   *     commit with same wapId
    */
   ManageSnapshots cherrypick(long snapshotId);
 
@@ -115,8 +117,8 @@ public interface ManageSnapshots extends PendingUpdate<Snapshot> {
    *
    * @param name name of branch to rename
    * @param newName the desired new name of the branch
-   * @throws IllegalArgumentException if the branch to rename does not exist or if there is already a branch
-   * with the same name as the desired new name.
+   * @throws IllegalArgumentException if the branch to rename does not exist or if there is already
+   *     a branch with the same name as the desired new name.
    */
   ManageSnapshots renameBranch(String name, String newName);
 
@@ -148,8 +150,8 @@ public interface ManageSnapshots extends PendingUpdate<Snapshot> {
   ManageSnapshots replaceBranch(String name, long snapshotId);
 
   /**
-   * Replaces the branch with the given name to point to the source snapshot.
-   * The source branch will remain unchanged, the target branch will retain its retention properties.
+   * Replaces the branch with the given name to point to the source snapshot. The source branch will
+   * remain unchanged, the target branch will retain its retention properties.
    *
    * @param name Branch to replace
    * @param source Source reference for the target to be replaced with
@@ -158,8 +160,9 @@ public interface ManageSnapshots extends PendingUpdate<Snapshot> {
   ManageSnapshots replaceBranch(String name, String source);
 
   /**
-   * Performs a fast-forward of the given target branch up to the source snapshot if target is an ancestor of source.
-   * The source branch will remain unchanged, the target branch will retain its retention properties.
+   * Performs a fast-forward of the given target branch up to the source snapshot if target is an
+   * ancestor of source. The source branch will remain unchanged, the target branch will retain its
+   * retention properties.
    *
    * @param name Branch to fast-forward
    * @param source Source reference for the target to be fast forwarded to
diff --git a/api/src/main/java/org/apache/iceberg/ManifestContent.java b/api/src/main/java/org/apache/iceberg/ManifestContent.java
index 1c32b99156..264fc82565 100644
--- a/api/src/main/java/org/apache/iceberg/ManifestContent.java
+++ b/api/src/main/java/org/apache/iceberg/ManifestContent.java
@@ -16,12 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
-/**
- * Content type stored in a manifest file, either DATA or DELETES.
- */
+/** Content type stored in a manifest file, either DATA or DELETES. */
 public enum ManifestContent {
   DATA(0),
   DELETES(1);
@@ -38,8 +35,10 @@ public enum ManifestContent {
 
   public static ManifestContent fromId(int id) {
     switch (id) {
-      case 0: return DATA;
-      case 1: return DELETES;
+      case 0:
+        return DATA;
+      case 1:
+        return DELETES;
     }
     throw new IllegalArgumentException("Unknown manifest content: " + id);
   }
diff --git a/api/src/main/java/org/apache/iceberg/ManifestFile.java b/api/src/main/java/org/apache/iceberg/ManifestFile.java
index 7eb89f49b5..e5cbfa1700 100644
--- a/api/src/main/java/org/apache/iceberg/ManifestFile.java
+++ b/api/src/main/java/org/apache/iceberg/ManifestFile.java
@@ -16,100 +16,120 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
+import static org.apache.iceberg.types.Types.NestedField.optional;
+import static org.apache.iceberg.types.Types.NestedField.required;
+
 import java.nio.ByteBuffer;
 import java.util.List;
 import org.apache.iceberg.types.Types;
 
-import static org.apache.iceberg.types.Types.NestedField.optional;
-import static org.apache.iceberg.types.Types.NestedField.required;
-
-/**
- * Represents a manifest file that can be scanned to find data files in a table.
- */
+/** Represents a manifest file that can be scanned to find data files in a table. */
 public interface ManifestFile {
-  Types.NestedField PATH = required(500, "manifest_path", Types.StringType.get(), "Location URI with FS scheme");
-  Types.NestedField LENGTH = required(501, "manifest_length", Types.LongType.get(), "Total file size in bytes");
-  Types.NestedField SPEC_ID = required(502, "partition_spec_id", Types.IntegerType.get(), "Spec ID used to write");
-  Types.NestedField MANIFEST_CONTENT = optional(517, "content", Types.IntegerType.get(),
-      "Contents of the manifest: 0=data, 1=deletes");
-  Types.NestedField SEQUENCE_NUMBER = optional(515, "sequence_number", Types.LongType.get(),
-      "Sequence number when the manifest was added");
-  Types.NestedField MIN_SEQUENCE_NUMBER = optional(516, "min_sequence_number", Types.LongType.get(),
-      "Lowest sequence number in the manifest");
-  Types.NestedField SNAPSHOT_ID = optional(503, "added_snapshot_id", Types.LongType.get(),
-      "Snapshot ID that added the manifest");
-  Types.NestedField ADDED_FILES_COUNT = optional(504, "added_data_files_count", Types.IntegerType.get(),
-      "Added entry count");
-  Types.NestedField EXISTING_FILES_COUNT = optional(505, "existing_data_files_count", Types.IntegerType.get(),
-      "Existing entry count");
-  Types.NestedField DELETED_FILES_COUNT = optional(506, "deleted_data_files_count", Types.IntegerType.get(),
-      "Deleted entry count");
-  Types.NestedField ADDED_ROWS_COUNT = optional(512, "added_rows_count", Types.LongType.get(),
-      "Added rows count");
-  Types.NestedField EXISTING_ROWS_COUNT = optional(513, "existing_rows_count", Types.LongType.get(),
-      "Existing rows count");
-  Types.NestedField DELETED_ROWS_COUNT = optional(514, "deleted_rows_count", Types.LongType.get(),
-      "Deleted rows count");
-  Types.StructType PARTITION_SUMMARY_TYPE = Types.StructType.of(
-      required(509, "contains_null", Types.BooleanType.get(), "True if any file has a null partition value"),
-      optional(518, "contains_nan", Types.BooleanType.get(), "True if any file has a nan partition value"),
-      optional(510, "lower_bound", Types.BinaryType.get(), "Partition lower bound for all files"),
-      optional(511, "upper_bound", Types.BinaryType.get(), "Partition upper bound for all files")
-  );
-  Types.NestedField PARTITION_SUMMARIES = optional(507, "partitions",
-      Types.ListType.ofRequired(508, PARTITION_SUMMARY_TYPE),
-      "Summary for each partition");
-  Types.NestedField KEY_METADATA = optional(519, "key_metadata", Types.BinaryType.get(),
-      "Encryption key metadata blob");
+  Types.NestedField PATH =
+      required(500, "manifest_path", Types.StringType.get(), "Location URI with FS scheme");
+  Types.NestedField LENGTH =
+      required(501, "manifest_length", Types.LongType.get(), "Total file size in bytes");
+  Types.NestedField SPEC_ID =
+      required(502, "partition_spec_id", Types.IntegerType.get(), "Spec ID used to write");
+  Types.NestedField MANIFEST_CONTENT =
+      optional(
+          517, "content", Types.IntegerType.get(), "Contents of the manifest: 0=data, 1=deletes");
+  Types.NestedField SEQUENCE_NUMBER =
+      optional(
+          515,
+          "sequence_number",
+          Types.LongType.get(),
+          "Sequence number when the manifest was added");
+  Types.NestedField MIN_SEQUENCE_NUMBER =
+      optional(
+          516,
+          "min_sequence_number",
+          Types.LongType.get(),
+          "Lowest sequence number in the manifest");
+  Types.NestedField SNAPSHOT_ID =
+      optional(
+          503, "added_snapshot_id", Types.LongType.get(), "Snapshot ID that added the manifest");
+  Types.NestedField ADDED_FILES_COUNT =
+      optional(504, "added_data_files_count", Types.IntegerType.get(), "Added entry count");
+  Types.NestedField EXISTING_FILES_COUNT =
+      optional(505, "existing_data_files_count", Types.IntegerType.get(), "Existing entry count");
+  Types.NestedField DELETED_FILES_COUNT =
+      optional(506, "deleted_data_files_count", Types.IntegerType.get(), "Deleted entry count");
+  Types.NestedField ADDED_ROWS_COUNT =
+      optional(512, "added_rows_count", Types.LongType.get(), "Added rows count");
+  Types.NestedField EXISTING_ROWS_COUNT =
+      optional(513, "existing_rows_count", Types.LongType.get(), "Existing rows count");
+  Types.NestedField DELETED_ROWS_COUNT =
+      optional(514, "deleted_rows_count", Types.LongType.get(), "Deleted rows count");
+  Types.StructType PARTITION_SUMMARY_TYPE =
+      Types.StructType.of(
+          required(
+              509,
+              "contains_null",
+              Types.BooleanType.get(),
+              "True if any file has a null partition value"),
+          optional(
+              518,
+              "contains_nan",
+              Types.BooleanType.get(),
+              "True if any file has a nan partition value"),
+          optional(
+              510, "lower_bound", Types.BinaryType.get(), "Partition lower bound for all files"),
+          optional(
+              511, "upper_bound", Types.BinaryType.get(), "Partition upper bound for all files"));
+  Types.NestedField PARTITION_SUMMARIES =
+      optional(
+          507,
+          "partitions",
+          Types.ListType.ofRequired(508, PARTITION_SUMMARY_TYPE),
+          "Summary for each partition");
+  Types.NestedField KEY_METADATA =
+      optional(519, "key_metadata", Types.BinaryType.get(), "Encryption key metadata blob");
   // next ID to assign: 520
 
-  Schema SCHEMA = new Schema(
-      PATH, LENGTH, SPEC_ID, MANIFEST_CONTENT,
-      SEQUENCE_NUMBER, MIN_SEQUENCE_NUMBER, SNAPSHOT_ID,
-      ADDED_FILES_COUNT, EXISTING_FILES_COUNT, DELETED_FILES_COUNT,
-      ADDED_ROWS_COUNT, EXISTING_ROWS_COUNT, DELETED_ROWS_COUNT,
-      PARTITION_SUMMARIES, KEY_METADATA);
+  Schema SCHEMA =
+      new Schema(
+          PATH,
+          LENGTH,
+          SPEC_ID,
+          MANIFEST_CONTENT,
+          SEQUENCE_NUMBER,
+          MIN_SEQUENCE_NUMBER,
+          SNAPSHOT_ID,
+          ADDED_FILES_COUNT,
+          EXISTING_FILES_COUNT,
+          DELETED_FILES_COUNT,
+          ADDED_ROWS_COUNT,
+          EXISTING_ROWS_COUNT,
+          DELETED_ROWS_COUNT,
+          PARTITION_SUMMARIES,
+          KEY_METADATA);
 
   static Schema schema() {
     return SCHEMA;
   }
 
-  /**
-   * Returns fully qualified path to the file, suitable for constructing a Hadoop Path.
-   */
+  /** Returns fully qualified path to the file, suitable for constructing a Hadoop Path. */
   String path();
 
-  /**
-   * Returns length of the manifest file.
-   */
+  /** Returns length of the manifest file. */
   long length();
 
-  /**
-   * Returns iD of the {@link PartitionSpec} used to write the manifest file.
-   */
+  /** Returns iD of the {@link PartitionSpec} used to write the manifest file. */
   int partitionSpecId();
 
-  /**
-   * Returns the content stored in the manifest; either DATA or DELETES.
-   */
+  /** Returns the content stored in the manifest; either DATA or DELETES. */
   ManifestContent content();
 
-  /**
-   * Returns the sequence number of the commit that added the manifest file.
-   */
+  /** Returns the sequence number of the commit that added the manifest file. */
   long sequenceNumber();
 
-  /**
-   * Returns the lowest sequence number of any data file in the manifest.
-   */
+  /** Returns the lowest sequence number of any data file in the manifest. */
   long minSequenceNumber();
 
-  /**
-   * Returns iD of the snapshot that added the manifest file to table metadata.
-   */
+  /** Returns iD of the snapshot that added the manifest file to table metadata. */
   Long snapshotId();
 
   /**
@@ -121,14 +141,10 @@ public interface ManifestFile {
     return addedFilesCount() == null || addedFilesCount() > 0;
   }
 
-  /**
-   * Returns the number of data files with status ADDED in the manifest file.
-   */
+  /** Returns the number of data files with status ADDED in the manifest file. */
   Integer addedFilesCount();
 
-  /**
-   * Returns the total number of rows in all data files with status ADDED in the manifest file.
-   */
+  /** Returns the total number of rows in all data files with status ADDED in the manifest file. */
   Long addedRowsCount();
 
   /**
@@ -140,9 +156,7 @@ public interface ManifestFile {
     return existingFilesCount() == null || existingFilesCount() > 0;
   }
 
-  /**
-   * Returns the number of data files with status EXISTING in the manifest file.
-   */
+  /** Returns the number of data files with status EXISTING in the manifest file. */
   Integer existingFilesCount();
 
   /**
@@ -159,9 +173,7 @@ public interface ManifestFile {
     return deletedFilesCount() == null || deletedFilesCount() > 0;
   }
 
-  /**
-   * Returns the number of data files with status DELETED in the manifest file.
-   */
+  /** Returns the number of data files with status DELETED in the manifest file. */
   Integer deletedFilesCount();
 
   /**
@@ -171,18 +183,18 @@ public interface ManifestFile {
 
   /**
    * Returns a list of {@link PartitionFieldSummary partition field summaries}.
-   * <p>
-   * Each summary corresponds to a field in the manifest file's partition spec, by ordinal. For
-   * example, the partition spec [ ts_day=date(ts), type=identity(type) ] will have 2 summaries.
-   * The first summary is for the ts_day partition field and the second is for the type partition
-   * field.
+   *
+   * <p>Each summary corresponds to a field in the manifest file's partition spec, by ordinal. For
+   * example, the partition spec [ ts_day=date(ts), type=identity(type) ] will have 2 summaries. The
+   * first summary is for the ts_day partition field and the second is for the type partition field.
    *
    * @return a list of partition field summaries, one for each field in the manifest's spec
    */
   List<PartitionFieldSummary> partitions();
 
   /**
-   * Returns metadata about how this manifest file is encrypted, or null if the file is stored in plain text.
+   * Returns metadata about how this manifest file is encrypted, or null if the file is stored in
+   * plain text.
    */
   default ByteBuffer keyMetadata() {
     return null;
@@ -196,32 +208,26 @@ public interface ManifestFile {
    */
   ManifestFile copy();
 
-  /**
-   * Summarizes the values of one partition field stored in a manifest file.
-   */
+  /** Summarizes the values of one partition field stored in a manifest file. */
   interface PartitionFieldSummary {
     static Types.StructType getType() {
       return PARTITION_SUMMARY_TYPE;
     }
 
-    /**
-     * Returns true if at least one data file in the manifest has a null value for the field.
-     */
+    /** Returns true if at least one data file in the manifest has a null value for the field. */
     boolean containsNull();
 
     /**
-     * Returns true if at least one data file in the manifest has a NaN value for the field.
-     * Null if this information doesn't exist.
-     * <p>
-     * Default to return null to ensure backward compatibility.
+     * Returns true if at least one data file in the manifest has a NaN value for the field. Null if
+     * this information doesn't exist.
+     *
+     * <p>Default to return null to ensure backward compatibility.
      */
     default Boolean containsNaN() {
       return null;
     }
 
-    /**
-     * Returns a ByteBuffer that contains a serialized bound lower than all values of the field.
-     */
+    /** Returns a ByteBuffer that contains a serialized bound lower than all values of the field. */
     ByteBuffer lowerBound();
 
     /**
diff --git a/api/src/main/java/org/apache/iceberg/MergeableScanTask.java b/api/src/main/java/org/apache/iceberg/MergeableScanTask.java
index e1f30a630f..17aeb775dc 100644
--- a/api/src/main/java/org/apache/iceberg/MergeableScanTask.java
+++ b/api/src/main/java/org/apache/iceberg/MergeableScanTask.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 /**
@@ -35,8 +34,8 @@ public interface MergeableScanTask<ThisT> extends ScanTask {
 
   /**
    * Merges this task with a given task.
-   * <p>
-   * Note this method will be called only if {@link #canMerge(ScanTask)} returns true.
+   *
+   * <p>Note this method will be called only if {@link #canMerge(ScanTask)} returns true.
    *
    * @param other another task
    * @return a new merged task
diff --git a/api/src/main/java/org/apache/iceberg/Metrics.java b/api/src/main/java/org/apache/iceberg/Metrics.java
index 30a9a8cc7d..2f2cf89cda 100644
--- a/api/src/main/java/org/apache/iceberg/Metrics.java
+++ b/api/src/main/java/org/apache/iceberg/Metrics.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.io.IOException;
@@ -28,9 +27,7 @@ import java.util.Map;
 import org.apache.iceberg.relocated.com.google.common.collect.Maps;
 import org.apache.iceberg.util.ByteBuffers;
 
-/**
- * Iceberg file format metrics.
- */
+/** Iceberg file format metrics. */
 public class Metrics implements Serializable {
 
   private Long rowCount = null;
@@ -41,14 +38,14 @@ public class Metrics implements Serializable {
   private Map<Integer, ByteBuffer> lowerBounds = null;
   private Map<Integer, ByteBuffer> upperBounds = null;
 
-  public Metrics() {
-  }
+  public Metrics() {}
 
-  public Metrics(Long rowCount,
-                 Map<Integer, Long> columnSizes,
-                 Map<Integer, Long> valueCounts,
-                 Map<Integer, Long> nullValueCounts,
-                 Map<Integer, Long> nanValueCounts) {
+  public Metrics(
+      Long rowCount,
+      Map<Integer, Long> columnSizes,
+      Map<Integer, Long> valueCounts,
+      Map<Integer, Long> nullValueCounts,
+      Map<Integer, Long> nanValueCounts) {
     this.rowCount = rowCount;
     this.columnSizes = columnSizes;
     this.valueCounts = valueCounts;
@@ -56,13 +53,14 @@ public class Metrics implements Serializable {
     this.nanValueCounts = nanValueCounts;
   }
 
-  public Metrics(Long rowCount,
-                 Map<Integer, Long> columnSizes,
-                 Map<Integer, Long> valueCounts,
-                 Map<Integer, Long> nullValueCounts,
-                 Map<Integer, Long> nanValueCounts,
-                 Map<Integer, ByteBuffer> lowerBounds,
-                 Map<Integer, ByteBuffer> upperBounds) {
+  public Metrics(
+      Long rowCount,
+      Map<Integer, Long> columnSizes,
+      Map<Integer, Long> valueCounts,
+      Map<Integer, Long> nullValueCounts,
+      Map<Integer, Long> nanValueCounts,
+      Map<Integer, ByteBuffer> lowerBounds,
+      Map<Integer, ByteBuffer> upperBounds) {
     this.rowCount = rowCount;
     this.columnSizes = columnSizes;
     this.valueCounts = valueCounts;
@@ -120,12 +118,12 @@ public class Metrics implements Serializable {
   /**
    * Get the non-null lower bound values for all fields in a file.
    *
-   * To convert the {@link ByteBuffer} back to a value, use
-   * {@link org.apache.iceberg.types.Conversions#fromByteBuffer}.
+   * <p>To convert the {@link ByteBuffer} back to a value, use {@link
+   * org.apache.iceberg.types.Conversions#fromByteBuffer}.
    *
    * @return a Map of fieldId to the lower bound value as a ByteBuffer
-   * @see <a href="https://iceberg.apache.org/spec/#appendix-d-single-value-serialization">
-   *   Iceberg Spec - Appendix D: Single-value serialization</a>
+   * @see <a href="https://iceberg.apache.org/spec/#appendix-d-single-value-serialization">Iceberg
+   *     Spec - Appendix D: Single-value serialization</a>
    */
   public Map<Integer, ByteBuffer> lowerBounds() {
     return lowerBounds;
@@ -142,6 +140,7 @@ public class Metrics implements Serializable {
 
   /**
    * Implemented the method to enable serialization of ByteBuffers.
+   *
    * @param out The stream where to write
    * @throws IOException On serialization error
    */
@@ -156,8 +155,8 @@ public class Metrics implements Serializable {
     writeByteBufferMap(out, upperBounds);
   }
 
-  private static void writeByteBufferMap(ObjectOutputStream out, Map<Integer, ByteBuffer> byteBufferMap)
-      throws IOException {
+  private static void writeByteBufferMap(
+      ObjectOutputStream out, Map<Integer, ByteBuffer> byteBufferMap) throws IOException {
     if (byteBufferMap == null) {
       out.writeInt(-1);
 
@@ -175,6 +174,7 @@ public class Metrics implements Serializable {
 
   /**
    * Implemented the method to enable deserialization of ByteBuffers.
+   *
    * @param in The stream to read from
    * @throws IOException On serialization error
    * @throws ClassNotFoundException If the class is not found
diff --git a/api/src/main/java/org/apache/iceberg/NullOrder.java b/api/src/main/java/org/apache/iceberg/NullOrder.java
index 195d4e9445..560649e37d 100644
--- a/api/src/main/java/org/apache/iceberg/NullOrder.java
+++ b/api/src/main/java/org/apache/iceberg/NullOrder.java
@@ -16,11 +16,11 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 public enum NullOrder {
-  NULLS_FIRST, NULLS_LAST;
+  NULLS_FIRST,
+  NULLS_LAST;
 
   @Override
   public String toString() {
diff --git a/api/src/main/java/org/apache/iceberg/OverwriteFiles.java b/api/src/main/java/org/apache/iceberg/OverwriteFiles.java
index e86c99d558..c51f96485e 100644
--- a/api/src/main/java/org/apache/iceberg/OverwriteFiles.java
+++ b/api/src/main/java/org/apache/iceberg/OverwriteFiles.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import org.apache.iceberg.exceptions.ValidationException;
@@ -25,33 +24,33 @@ import org.apache.iceberg.expressions.Projections;
 
 /**
  * API for overwriting files in a table.
- * <p>
- * This API accumulates file additions and produces a new {@link Snapshot} of the table by replacing
- * all the deleted files with the set of additions. This operation is used to implement idempotent
- * writes that always replace a section of a table with new data or update/delete operations that
- * eagerly overwrite files.
- * <p>
- * Overwrites can be validated. The default validation mode is idempotent, meaning the overwrite is
- * correct and should be committed out regardless of other concurrent changes to the table.
- * For example, this can be used for replacing all the data for day D with query results.
- * Alternatively, this API can be configured for overwriting certain files with their filtered
- * versions while ensuring no new data that would need to be filtered has been added.
- * <p>
- * When committing, these changes will be applied to the latest table snapshot. Commit conflicts
+ *
+ * <p>This API accumulates file additions and produces a new {@link Snapshot} of the table by
+ * replacing all the deleted files with the set of additions. This operation is used to implement
+ * idempotent writes that always replace a section of a table with new data or update/delete
+ * operations that eagerly overwrite files.
+ *
+ * <p>Overwrites can be validated. The default validation mode is idempotent, meaning the overwrite
+ * is correct and should be committed out regardless of other concurrent changes to the table. For
+ * example, this can be used for replacing all the data for day D with query results. Alternatively,
+ * this API can be configured for overwriting certain files with their filtered versions while
+ * ensuring no new data that would need to be filtered has been added.
+ *
+ * <p>When committing, these changes will be applied to the latest table snapshot. Commit conflicts
  * will be resolved by applying the changes to the new latest snapshot and reattempting the commit.
  */
 public interface OverwriteFiles extends SnapshotUpdate<OverwriteFiles> {
   /**
    * Delete files that match an {@link Expression} on data rows from the table.
-   * <p>
-   * A file is selected to be deleted by the expression if it could contain any rows that match the
-   * expression (candidate files are selected using an
-   * {@link Projections#inclusive(PartitionSpec) inclusive projection}). These candidate files are
-   * deleted if all of the rows in the file must match the expression (the partition data matches
-   * the expression's {@link Projections#strict(PartitionSpec)} strict projection}). This guarantees
+   *
+   * <p>A file is selected to be deleted by the expression if it could contain any rows that match
+   * the expression (candidate files are selected using an {@link
+   * Projections#inclusive(PartitionSpec) inclusive projection}). These candidate files are deleted
+   * if all of the rows in the file must match the expression (the partition data matches the
+   * expression's {@link Projections#strict(PartitionSpec)} strict projection}). This guarantees
    * that files are deleted if and only if all rows in the file must match the expression.
-   * <p>
-   * Files that may contain some rows that match the expression and some rows that do not will
+   *
+   * <p>Files that may contain some rows that match the expression and some rows that do not will
    * result in a {@link ValidationException}.
    *
    * @param expr an expression on rows in the table
@@ -78,9 +77,9 @@ public interface OverwriteFiles extends SnapshotUpdate<OverwriteFiles> {
 
   /**
    * Signal that each file added to the table must match the overwrite expression.
-   * <p>
-   * If this method is called, each added file is validated on commit to ensure that it matches the
-   * overwrite row filter. This is used to ensure that writes are idempotent: that files cannot
+   *
+   * <p>If this method is called, each added file is validated on commit to ensure that it matches
+   * the overwrite row filter. This is used to ensure that writes are idempotent: that files cannot
    * be added during a commit that would not be removed if the operation were run a second time.
    *
    * @return this for method chaining
@@ -89,9 +88,9 @@ public interface OverwriteFiles extends SnapshotUpdate<OverwriteFiles> {
 
   /**
    * Set the snapshot ID used in any reads for this operation.
-   * <p>
-   * Validations will check changes after this snapshot ID. If the from snapshot is not set, all ancestor snapshots
-   * through the table's initial snapshot are validated.
+   *
+   * <p>Validations will check changes after this snapshot ID. If the from snapshot is not set, all
+   * ancestor snapshots through the table's initial snapshot are validated.
    *
    * @param snapshotId a snapshot ID
    * @return this for method chaining
@@ -107,23 +106,25 @@ public interface OverwriteFiles extends SnapshotUpdate<OverwriteFiles> {
   OverwriteFiles caseSensitive(boolean caseSensitive);
 
   /**
-   * Enables validation that data files added concurrently do not conflict with this commit's operation.
-   * <p>
-   * This method should be called while committing non-idempotent overwrite operations.
-   * If a concurrent operation commits a new file after the data was read and that file might
-   * contain rows matching the specified conflict detection filter, the overwrite operation
-   * will detect this and fail.
-   * <p>
-   * Calling this method with a correct conflict detection filter is required to maintain
-   * serializable isolation for overwrite operations. Otherwise, the isolation level
-   * will be snapshot isolation.
-   * <p>
-   * Validation applies to files added to the table since the snapshot passed to {@link #validateFromSnapshot(long)}.
+   * Enables validation that data files added concurrently do not conflict with this commit's
+   * operation.
+   *
+   * <p>This method should be called while committing non-idempotent overwrite operations. If a
+   * concurrent operation commits a new file after the data was read and that file might contain
+   * rows matching the specified conflict detection filter, the overwrite operation will detect this
+   * and fail.
+   *
+   * <p>Calling this method with a correct conflict detection filter is required to maintain
+   * serializable isolation for overwrite operations. Otherwise, the isolation level will be
+   * snapshot isolation.
+   *
+   * <p>Validation applies to files added to the table since the snapshot passed to {@link
+   * #validateFromSnapshot(long)}.
    *
    * @param conflictDetectionFilter an expression on rows in the table
    * @return this for method chaining
-   * @deprecated since 0.13.0, will be removed in 0.14.0; use {@link #conflictDetectionFilter(Expression)} and
-   *             {@link #validateNoConflictingData()} instead.
+   * @deprecated since 0.13.0, will be removed in 0.14.0; use {@link
+   *     #conflictDetectionFilter(Expression)} and {@link #validateNoConflictingData()} instead.
    */
   @Deprecated
   default OverwriteFiles validateNoConflictingAppends(Expression conflictDetectionFilter) {
@@ -140,39 +141,41 @@ public interface OverwriteFiles extends SnapshotUpdate<OverwriteFiles> {
 
   /**
    * Enables validation that data added concurrently does not conflict with this commit's operation.
-   * <p>
-   * This method should be called while committing non-idempotent overwrite operations.
-   * If a concurrent operation commits a new file after the data was read and that file might
-   * contain rows matching the specified conflict detection filter, the overwrite operation
-   * will detect this and fail.
-   * <p>
-   * Calling this method with a correct conflict detection filter is required to maintain
+   *
+   * <p>This method should be called while committing non-idempotent overwrite operations. If a
+   * concurrent operation commits a new file after the data was read and that file might contain
+   * rows matching the specified conflict detection filter, the overwrite operation will detect this
+   * and fail.
+   *
+   * <p>Calling this method with a correct conflict detection filter is required to maintain
    * isolation for non-idempotent overwrite operations.
-   * <p>
-   * Validation uses the conflict detection filter passed to {@link #conflictDetectionFilter(Expression)} and
-   * applies to operations that happened after the snapshot passed to {@link #validateFromSnapshot(long)}.
-   * If the conflict detection filter is not set, any new data added concurrently will fail this
-   * overwrite operation.
+   *
+   * <p>Validation uses the conflict detection filter passed to {@link
+   * #conflictDetectionFilter(Expression)} and applies to operations that happened after the
+   * snapshot passed to {@link #validateFromSnapshot(long)}. If the conflict detection filter is not
+   * set, any new data added concurrently will fail this overwrite operation.
    *
    * @return this for method chaining
    */
   OverwriteFiles validateNoConflictingData();
 
   /**
-   * Enables validation that deletes that happened concurrently do not conflict with this commit's operation.
-   * <p>
-   * Validating concurrent deletes is required during non-idempotent overwrite operations.
-   * If a concurrent operation deletes data in one of the files being overwritten, the overwrite
+   * Enables validation that deletes that happened concurrently do not conflict with this commit's
+   * operation.
+   *
+   * <p>Validating concurrent deletes is required during non-idempotent overwrite operations. If a
+   * concurrent operation deletes data in one of the files being overwritten, the overwrite
    * operation must be aborted as it may undelete rows that were removed concurrently.
-   * <p>
-   * Calling this method with a correct conflict detection filter is required to maintain
+   *
+   * <p>Calling this method with a correct conflict detection filter is required to maintain
    * isolation for non-idempotent overwrite operations.
-   * <p>
-   * Validation uses the conflict detection filter passed to {@link #conflictDetectionFilter(Expression)} and
-   * applies to operations that happened after the snapshot passed to {@link #validateFromSnapshot(long)}.
-   * If the conflict detection filter is not set, this operation will use the row filter provided
-   * in {@link #overwriteByRowFilter(Expression)} to check for new delete files and will ensure
-   * there are no conflicting deletes for data files removed via {@link #deleteFile(DataFile)}.
+   *
+   * <p>Validation uses the conflict detection filter passed to {@link
+   * #conflictDetectionFilter(Expression)} and applies to operations that happened after the
+   * snapshot passed to {@link #validateFromSnapshot(long)}. If the conflict detection filter is not
+   * set, this operation will use the row filter provided in {@link
+   * #overwriteByRowFilter(Expression)} to check for new delete files and will ensure there are no
+   * conflicting deletes for data files removed via {@link #deleteFile(DataFile)}.
    *
    * @return this for method chaining
    */
diff --git a/api/src/main/java/org/apache/iceberg/PartitionField.java b/api/src/main/java/org/apache/iceberg/PartitionField.java
index 2b97bdfa91..5956e01d7b 100644
--- a/api/src/main/java/org/apache/iceberg/PartitionField.java
+++ b/api/src/main/java/org/apache/iceberg/PartitionField.java
@@ -16,16 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.io.Serializable;
 import org.apache.iceberg.relocated.com.google.common.base.Objects;
 import org.apache.iceberg.transforms.Transform;
 
-/**
- * Represents a single field in a {@link PartitionSpec}.
- */
+/** Represents a single field in a {@link PartitionSpec}. */
 public class PartitionField implements Serializable {
   private final int sourceId;
   private final int fieldId;
@@ -39,30 +36,22 @@ public class PartitionField implements Serializable {
     this.transform = transform;
   }
 
-  /**
-   * Returns the field id of the source field in the {@link PartitionSpec spec's} table schema.
-   */
+  /** Returns the field id of the source field in the {@link PartitionSpec spec's} table schema. */
   public int sourceId() {
     return sourceId;
   }
 
-  /**
-   * Returns the partition field id across all the table metadata's partition specs.
-   */
+  /** Returns the partition field id across all the table metadata's partition specs. */
   public int fieldId() {
     return fieldId;
   }
 
-  /**
-   * Returns the name of this partition field.
-   */
+  /** Returns the name of this partition field. */
   public String name() {
     return name;
   }
 
-  /**
-   * Returns the transform used to produce partition values from source values.
-   */
+  /** Returns the transform used to produce partition values from source values. */
   public Transform<?, ?> transform() {
     return transform;
   }
@@ -81,10 +70,10 @@ public class PartitionField implements Serializable {
     }
 
     PartitionField that = (PartitionField) other;
-    return sourceId == that.sourceId &&
-        fieldId == that.fieldId &&
-        name.equals(that.name) &&
-        transform.equals(that.transform);
+    return sourceId == that.sourceId
+        && fieldId == that.fieldId
+        && name.equals(that.name)
+        && transform.equals(that.transform);
   }
 
   @Override
diff --git a/api/src/main/java/org/apache/iceberg/PartitionKey.java b/api/src/main/java/org/apache/iceberg/PartitionKey.java
index 71cdb2756e..0f696b59c4 100644
--- a/api/src/main/java/org/apache/iceberg/PartitionKey.java
+++ b/api/src/main/java/org/apache/iceberg/PartitionKey.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.io.Serializable;
@@ -28,8 +27,9 @@ import org.apache.iceberg.transforms.Transform;
 
 /**
  * A struct of partition values.
- * <p>
- * Instances of this class can produce partition values from a data row passed to {@link #partition(StructLike)}.
+ *
+ * <p>Instances of this class can produce partition values from a data row passed to {@link
+ * #partition(StructLike)}.
  */
 public class PartitionKey implements StructLike, Serializable {
 
@@ -53,7 +53,8 @@ public class PartitionKey implements StructLike, Serializable {
     for (int i = 0; i < size; i += 1) {
       PartitionField field = fields.get(i);
       Accessor<StructLike> accessor = inputSchema.accessorForField(field.sourceId());
-      Preconditions.checkArgument(accessor != null,
+      Preconditions.checkArgument(
+          accessor != null,
           "Cannot build accessor for field: " + schema.findField(field.sourceId()));
       this.accessors[i] = accessor;
       this.transforms[i] = field.transform();
diff --git a/api/src/main/java/org/apache/iceberg/PartitionSpec.java b/api/src/main/java/org/apache/iceberg/PartitionSpec.java
index 288445f82a..e984fc69d8 100644
--- a/api/src/main/java/org/apache/iceberg/PartitionSpec.java
+++ b/api/src/main/java/org/apache/iceberg/PartitionSpec.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.io.Serializable;
@@ -45,8 +44,8 @@ import org.apache.iceberg.types.Types.StructType;
 
 /**
  * Represents how to produce partition data for a table.
- * <p>
- * Partition data is produced by transforming columns in a table. Each column transform is
+ *
+ * <p>Partition data is produced by transforming columns in a table. Each column transform is
  * represented by a named {@link PartitionField}.
  */
 public class PartitionSpec implements Serializable {
@@ -63,7 +62,8 @@ public class PartitionSpec implements Serializable {
   private transient volatile List<PartitionField> fieldList = null;
   private final int lastAssignedFieldId;
 
-  private PartitionSpec(Schema schema, int specId, List<PartitionField> fields, int lastAssignedFieldId) {
+  private PartitionSpec(
+      Schema schema, int specId, List<PartitionField> fields, int lastAssignedFieldId) {
     this.schema = schema;
     this.specId = specId;
     this.fields = new PartitionField[fields.size()];
@@ -73,23 +73,17 @@ public class PartitionSpec implements Serializable {
     this.lastAssignedFieldId = lastAssignedFieldId;
   }
 
-  /**
-   * Returns the {@link Schema} for this spec.
-   */
+  /** Returns the {@link Schema} for this spec. */
   public Schema schema() {
     return schema;
   }
 
-  /**
-   * Returns the ID of this spec.
-   */
+  /** Returns the ID of this spec. */
   public int specId() {
     return specId;
   }
 
-  /**
-   * Returns the list of {@link PartitionField partition fields} for this spec.
-   */
+  /** Returns the list of {@link PartitionField partition fields} for this spec. */
   public List<PartitionField> fields() {
     return lazyFieldList();
   }
@@ -110,7 +104,8 @@ public class PartitionSpec implements Serializable {
     UnboundPartitionSpec.Builder builder = UnboundPartitionSpec.builder().withSpecId(specId);
 
     for (PartitionField field : fields) {
-      builder.addField(field.transform().toString(), field.sourceId(), field.fieldId(), field.name());
+      builder.addField(
+          field.transform().toString(), field.sourceId(), field.fieldId(), field.name());
     }
 
     return builder.build();
@@ -126,9 +121,7 @@ public class PartitionSpec implements Serializable {
     return lazyFieldsBySourceId().get(fieldId);
   }
 
-  /**
-   * Returns a {@link StructType} for partition data defined by this spec.
-   */
+  /** Returns a {@link StructType} for partition data defined by this spec. */
   public StructType partitionType() {
     List<Types.NestedField> structFields = Lists.newArrayListWithExpectedSize(fields.length);
 
@@ -136,8 +129,7 @@ public class PartitionSpec implements Serializable {
       PartitionField field = fields[i];
       Type sourceType = schema.findType(field.sourceId());
       Type resultType = field.transform().getResultType(sourceType);
-      structFields.add(
-          Types.NestedField.optional(field.fieldId(), field.name(), resultType));
+      structFields.add(Types.NestedField.optional(field.fieldId(), field.name(), resultType));
     }
 
     return Types.StructType.of(structFields);
@@ -196,8 +188,9 @@ public class PartitionSpec implements Serializable {
   }
 
   /**
-   * Returns true if this spec is equivalent to the other, with partition field ids ignored.
-   * That is, if both specs have the same number of fields, field order, field name, source columns, and transforms.
+   * Returns true if this spec is equivalent to the other, with partition field ids ignored. That
+   * is, if both specs have the same number of fields, field order, field name, source columns, and
+   * transforms.
    *
    * @param other another PartitionSpec
    * @return true if the specs have the same fields, source columns, and transforms.
@@ -214,9 +207,9 @@ public class PartitionSpec implements Serializable {
     for (int i = 0; i < fields.length; i += 1) {
       PartitionField thisField = fields[i];
       PartitionField thatField = other.fields[i];
-      if (thisField.sourceId() != thatField.sourceId() ||
-          !thisField.transform().toString().equals(thatField.transform().toString()) ||
-          !thisField.name().equals(thatField.name())) {
+      if (thisField.sourceId() != thatField.sourceId()
+          || !thisField.transform().toString().equals(thatField.transform().toString())
+          || !thisField.name().equals(thatField.name())) {
         return false;
       }
     }
@@ -259,8 +252,9 @@ public class PartitionSpec implements Serializable {
     if (fieldsBySourceId == null) {
       synchronized (this) {
         if (fieldsBySourceId == null) {
-          ListMultimap<Integer, PartitionField> multiMap = Multimaps
-              .newListMultimap(Maps.newHashMap(), () -> Lists.newArrayListWithCapacity(fields.length));
+          ListMultimap<Integer, PartitionField> multiMap =
+              Multimaps.newListMultimap(
+                  Maps.newHashMap(), () -> Lists.newArrayListWithCapacity(fields.length));
           for (PartitionField field : fields) {
             multiMap.put(field.sourceId(), field);
           }
@@ -331,8 +325,8 @@ public class PartitionSpec implements Serializable {
 
   /**
    * Used to create valid {@link PartitionSpec partition specs}.
-   * <p>
-   * Call {@link #builderFor(Schema)} to create a new builder.
+   *
+   * <p>Call {@link #builderFor(Schema)} to create a new builder.
    */
   public static class Builder {
     private final Schema schema;
@@ -340,7 +334,8 @@ public class PartitionSpec implements Serializable {
     private final Set<String> partitionNames = Sets.newHashSet();
     private Map<Map.Entry<Integer, String>, PartitionField> dedupFields = Maps.newHashMap();
     private int specId = 0;
-    private final AtomicInteger lastAssignedFieldId = new AtomicInteger(unpartitionedLastAssignedId());
+    private final AtomicInteger lastAssignedFieldId =
+        new AtomicInteger(unpartitionedLastAssignedId());
     // check if there are conflicts between partition and schema field name
     private boolean checkConflicts = true;
 
@@ -365,29 +360,38 @@ public class PartitionSpec implements Serializable {
       Types.NestedField schemaField = schema.findField(name);
       if (checkConflicts) {
         if (sourceColumnId != null) {
-          // for identity transform case we allow conflicts between partition and schema field name as
+          // for identity transform case we allow conflicts between partition and schema field name
+          // as
           //   long as they are sourced from the same schema field
-          Preconditions.checkArgument(schemaField == null || schemaField.fieldId() == sourceColumnId,
-              "Cannot create identity partition sourced from different field in schema: %s", name);
+          Preconditions.checkArgument(
+              schemaField == null || schemaField.fieldId() == sourceColumnId,
+              "Cannot create identity partition sourced from different field in schema: %s",
+              name);
         } else {
-          // for all other transforms we don't allow conflicts between partition name and schema field name
-          Preconditions.checkArgument(schemaField == null,
-              "Cannot create partition from name that exists in schema: %s", name);
+          // for all other transforms we don't allow conflicts between partition name and schema
+          // field name
+          Preconditions.checkArgument(
+              schemaField == null,
+              "Cannot create partition from name that exists in schema: %s",
+              name);
         }
       }
-      Preconditions.checkArgument(name != null && !name.isEmpty(),
-          "Cannot use empty or null partition name: %s", name);
-      Preconditions.checkArgument(!partitionNames.contains(name),
-          "Cannot use partition name more than once: %s", name);
+      Preconditions.checkArgument(
+          name != null && !name.isEmpty(), "Cannot use empty or null partition name: %s", name);
+      Preconditions.checkArgument(
+          !partitionNames.contains(name), "Cannot use partition name more than once: %s", name);
       partitionNames.add(name);
     }
 
     private void checkForRedundantPartitions(PartitionField field) {
-      Map.Entry<Integer, String> dedupKey = new AbstractMap.SimpleEntry<>(
-          field.sourceId(), field.transform().dedupName());
+      Map.Entry<Integer, String> dedupKey =
+          new AbstractMap.SimpleEntry<>(field.sourceId(), field.transform().dedupName());
       PartitionField partitionField = dedupFields.get(dedupKey);
-      Preconditions.checkArgument(partitionField == null,
-          "Cannot add redundant partition: %s conflicts with %s", partitionField, field);
+      Preconditions.checkArgument(
+          partitionField == null,
+          "Cannot add redundant partition: %s conflicts with %s",
+          partitionField,
+          field);
       dedupFields.put(dedupKey, field);
     }
 
@@ -398,15 +402,20 @@ public class PartitionSpec implements Serializable {
 
     private Types.NestedField findSourceColumn(String sourceName) {
       Types.NestedField sourceColumn = schema.findField(sourceName);
-      Preconditions.checkArgument(sourceColumn != null, "Cannot find source column: %s", sourceName);
+      Preconditions.checkArgument(
+          sourceColumn != null, "Cannot find source column: %s", sourceName);
       return sourceColumn;
     }
 
     Builder identity(String sourceName, String targetName) {
       Types.NestedField sourceColumn = findSourceColumn(sourceName);
       checkAndAddPartitionName(targetName, sourceColumn.fieldId());
-      PartitionField field = new PartitionField(
-          sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.identity(sourceColumn.type()));
+      PartitionField field =
+          new PartitionField(
+              sourceColumn.fieldId(),
+              nextFieldId(),
+              targetName,
+              Transforms.identity(sourceColumn.type()));
       checkForRedundantPartitions(field);
       fields.add(field);
       return this;
@@ -419,8 +428,12 @@ public class PartitionSpec implements Serializable {
     public Builder year(String sourceName, String targetName) {
       checkAndAddPartitionName(targetName);
       Types.NestedField sourceColumn = findSourceColumn(sourceName);
-      PartitionField field = new PartitionField(
-          sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.year(sourceColumn.type()));
+      PartitionField field =
+          new PartitionField(
+              sourceColumn.fieldId(),
+              nextFieldId(),
+              targetName,
+              Transforms.year(sourceColumn.type()));
       checkForRedundantPartitions(field);
       fields.add(field);
       return this;
@@ -433,8 +446,12 @@ public class PartitionSpec implements Serializable {
     public Builder month(String sourceName, String targetName) {
       checkAndAddPartitionName(targetName);
       Types.NestedField sourceColumn = findSourceColumn(sourceName);
-      PartitionField field = new PartitionField(
-          sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.month(sourceColumn.type()));
+      PartitionField field =
+          new PartitionField(
+              sourceColumn.fieldId(),
+              nextFieldId(),
+              targetName,
+              Transforms.month(sourceColumn.type()));
       checkForRedundantPartitions(field);
       fields.add(field);
       return this;
@@ -447,8 +464,12 @@ public class PartitionSpec implements Serializable {
     public Builder day(String sourceName, String targetName) {
       checkAndAddPartitionName(targetName);
       Types.NestedField sourceColumn = findSourceColumn(sourceName);
-      PartitionField field = new PartitionField(
-          sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.day(sourceColumn.type()));
+      PartitionField field =
+          new PartitionField(
+              sourceColumn.fieldId(),
+              nextFieldId(),
+              targetName,
+              Transforms.day(sourceColumn.type()));
       checkForRedundantPartitions(field);
       fields.add(field);
       return this;
@@ -461,8 +482,12 @@ public class PartitionSpec implements Serializable {
     public Builder hour(String sourceName, String targetName) {
       checkAndAddPartitionName(targetName);
       Types.NestedField sourceColumn = findSourceColumn(sourceName);
-      PartitionField field = new PartitionField(
-          sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.hour(sourceColumn.type()));
+      PartitionField field =
+          new PartitionField(
+              sourceColumn.fieldId(),
+              nextFieldId(),
+              targetName,
+              Transforms.hour(sourceColumn.type()));
       checkForRedundantPartitions(field);
       fields.add(field);
       return this;
@@ -475,8 +500,12 @@ public class PartitionSpec implements Serializable {
     public Builder bucket(String sourceName, int numBuckets, String targetName) {
       checkAndAddPartitionName(targetName);
       Types.NestedField sourceColumn = findSourceColumn(sourceName);
-      fields.add(new PartitionField(
-          sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.bucket(sourceColumn.type(), numBuckets)));
+      fields.add(
+          new PartitionField(
+              sourceColumn.fieldId(),
+              nextFieldId(),
+              targetName,
+              Transforms.bucket(sourceColumn.type(), numBuckets)));
       return this;
     }
 
@@ -487,8 +516,12 @@ public class PartitionSpec implements Serializable {
     public Builder truncate(String sourceName, int width, String targetName) {
       checkAndAddPartitionName(targetName);
       Types.NestedField sourceColumn = findSourceColumn(sourceName);
-      fields.add(new PartitionField(
-          sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.truncate(sourceColumn.type(), width)));
+      fields.add(
+          new PartitionField(
+              sourceColumn.fieldId(),
+              nextFieldId(),
+              targetName,
+              Transforms.truncate(sourceColumn.type(), width)));
       return this;
     }
 
@@ -498,8 +531,11 @@ public class PartitionSpec implements Serializable {
 
     public Builder alwaysNull(String sourceName, String targetName) {
       Types.NestedField sourceColumn = findSourceColumn(sourceName);
-      checkAndAddPartitionName(targetName, sourceColumn.fieldId()); // can duplicate a source column name
-      fields.add(new PartitionField(sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.alwaysNull()));
+      checkAndAddPartitionName(
+          targetName, sourceColumn.fieldId()); // can duplicate a source column name
+      fields.add(
+          new PartitionField(
+              sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.alwaysNull()));
       return this;
     }
 
@@ -507,7 +543,8 @@ public class PartitionSpec implements Serializable {
       return alwaysNull(sourceName, sourceName + "_null");
     }
 
-    // add a partition field with an auto-increment partition field id starting from PARTITION_DATA_ID_START
+    // add a partition field with an auto-increment partition field id starting from
+    // PARTITION_DATA_ID_START
     Builder add(int sourceId, String name, String transform) {
       return add(sourceId, nextFieldId(), name, transform);
     }
@@ -539,14 +576,17 @@ public class PartitionSpec implements Serializable {
   static void checkCompatibility(PartitionSpec spec, Schema schema) {
     for (PartitionField field : spec.fields) {
       Type sourceType = schema.findType(field.sourceId());
-      ValidationException.check(sourceType != null,
-          "Cannot find source column for partition field: %s", field);
-      ValidationException.check(sourceType.isPrimitiveType(),
-          "Cannot partition by non-primitive source field: %s", sourceType);
+      ValidationException.check(
+          sourceType != null, "Cannot find source column for partition field: %s", field);
+      ValidationException.check(
+          sourceType.isPrimitiveType(),
+          "Cannot partition by non-primitive source field: %s",
+          sourceType);
       ValidationException.check(
           field.transform().canTransform(sourceType),
           "Invalid source type %s for transform: %s",
-          sourceType, field.transform());
+          sourceType,
+          field.transform());
     }
   }
 
diff --git a/api/src/main/java/org/apache/iceberg/PendingUpdate.java b/api/src/main/java/org/apache/iceberg/PendingUpdate.java
index 9c1b184345..f47b98238d 100644
--- a/api/src/main/java/org/apache/iceberg/PendingUpdate.java
+++ b/api/src/main/java/org/apache/iceberg/PendingUpdate.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import org.apache.iceberg.exceptions.CommitFailedException;
@@ -32,8 +31,8 @@ public interface PendingUpdate<T> {
 
   /**
    * Apply the pending changes and return the uncommitted changes for validation.
-   * <p>
-   * This does not result in a permanent update.
+   *
+   * <p>This does not result in a permanent update.
    *
    * @return the uncommitted changes that would be committed by calling {@link #commit()}
    * @throws ValidationException If the pending changes cannot be applied to the current metadata
@@ -43,20 +42,21 @@ public interface PendingUpdate<T> {
 
   /**
    * Apply the pending changes and commit.
-   * <p>
-   * Changes are committed by calling the underlying table's commit method.
-   * <p>
-   * Once the commit is successful, the updated table will be refreshed.
+   *
+   * <p>Changes are committed by calling the underlying table's commit method.
+   *
+   * <p>Once the commit is successful, the updated table will be refreshed.
    *
    * @throws ValidationException If the update cannot be applied to the current table metadata.
    * @throws CommitFailedException If the update cannot be committed due to conflicts.
-   * @throws CommitStateUnknownException If the update success or failure is unknown, no cleanup should be done in
-   * this case.
+   * @throws CommitStateUnknownException If the update success or failure is unknown, no cleanup
+   *     should be done in this case.
    */
   void commit();
 
   /**
    * Generates update event to notify about metadata changes
+   *
    * @return the generated event
    */
   default Object updateEvent() {
diff --git a/api/src/main/java/org/apache/iceberg/ReplacePartitions.java b/api/src/main/java/org/apache/iceberg/ReplacePartitions.java
index fdf7ea1d19..7e8ab65304 100644
--- a/api/src/main/java/org/apache/iceberg/ReplacePartitions.java
+++ b/api/src/main/java/org/apache/iceberg/ReplacePartitions.java
@@ -16,28 +16,27 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 /**
  * API for overwriting files in a table by partition.
- * <p>
- * This is provided to implement SQL compatible with Hive table operations but is not recommended.
- * Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data.
- * <p>
- * The default validation mode is idempotent, meaning the overwrite is
- * correct and should be committed out regardless of other concurrent changes to the table.
- * Alternatively, this API can be configured to validate that no new data or deletes
- * have been applied since a snapshot ID associated when this operation began.
- * This can be done by calling {@link #validateNoConflictingDeletes()}, {@link #validateNoConflictingData()},
- * to ensure that no conflicting delete files or data files respectively have been written since the snapshot
- * passed to {@link #validateFromSnapshot(long)}.
- * <p>
- * This API accumulates file additions and produces a new {@link Snapshot} of the table by replacing
- * all files in partitions with new data with the new additions. This operation is used to implement
- * dynamic partition replacement.
- * <p>
- * When committing, these changes will be applied to the latest table snapshot. Commit conflicts
+ *
+ * <p>This is provided to implement SQL compatible with Hive table operations but is not
+ * recommended. Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data.
+ *
+ * <p>The default validation mode is idempotent, meaning the overwrite is correct and should be
+ * committed out regardless of other concurrent changes to the table. Alternatively, this API can be
+ * configured to validate that no new data or deletes have been applied since a snapshot ID
+ * associated when this operation began. This can be done by calling {@link
+ * #validateNoConflictingDeletes()}, {@link #validateNoConflictingData()}, to ensure that no
+ * conflicting delete files or data files respectively have been written since the snapshot passed
+ * to {@link #validateFromSnapshot(long)}.
+ *
+ * <p>This API accumulates file additions and produces a new {@link Snapshot} of the table by
+ * replacing all files in partitions with new data with the new additions. This operation is used to
+ * implement dynamic partition replacement.
+ *
+ * <p>When committing, these changes will be applied to the latest table snapshot. Commit conflicts
  * will be resolved by applying the changes to the new latest snapshot and reattempting the commit.
  */
 public interface ReplacePartitions extends SnapshotUpdate<ReplacePartitions> {
@@ -59,24 +58,27 @@ public interface ReplacePartitions extends SnapshotUpdate<ReplacePartitions> {
   /**
    * Set the snapshot ID used in validations for this operation.
    *
-   * All validations will check changes after this snapshot ID. If this is not called, validation will occur
-   * from the beginning of the table's history.
+   * <p>All validations will check changes after this snapshot ID. If this is not called, validation
+   * will occur from the beginning of the table's history.
    *
-   * This method should be called before this operation is committed.
-   * If a concurrent operation committed a data or delta file or removed a data file after the given snapshot ID
-   * that might contain rows matching a partition marked for deletion, validation will detect this and fail.
+   * <p>This method should be called before this operation is committed. If a concurrent operation
+   * committed a data or delta file or removed a data file after the given snapshot ID that might
+   * contain rows matching a partition marked for deletion, validation will detect this and fail.
    *
-   * @param snapshotId a snapshot ID, it should be set to when this operation started to read the table.
+   * @param snapshotId a snapshot ID, it should be set to when this operation started to read the
+   *     table.
    * @return this for method chaining
    */
   ReplacePartitions validateFromSnapshot(long snapshotId);
 
   /**
-   * Enables validation that deletes that happened concurrently do not conflict with this commit's operation.
-   * <p>
-   * Validating concurrent deletes is required during non-idempotent replace partition operations.
-   * This will check if a concurrent operation deletes data in any of the partitions being overwritten,
-   * as the replace partition must be aborted to avoid undeleting rows that were removed concurrently.
+   * Enables validation that deletes that happened concurrently do not conflict with this commit's
+   * operation.
+   *
+   * <p>Validating concurrent deletes is required during non-idempotent replace partition
+   * operations. This will check if a concurrent operation deletes data in any of the partitions
+   * being overwritten, as the replace partition must be aborted to avoid undeleting rows that were
+   * removed concurrently.
    *
    * @return this for method chaining
    */
@@ -84,10 +86,11 @@ public interface ReplacePartitions extends SnapshotUpdate<ReplacePartitions> {
 
   /**
    * Enables validation that data added concurrently does not conflict with this commit's operation.
-   * <p>
-   * Validating concurrent data files is required during non-idempotent replace partition operations.
-   * This will check if a concurrent operation inserts data in any of the partitions being overwritten,
-   * as the replace partition must be aborted to avoid removing rows added concurrently.
+   *
+   * <p>Validating concurrent data files is required during non-idempotent replace partition
+   * operations. This will check if a concurrent operation inserts data in any of the partitions
+   * being overwritten, as the replace partition must be aborted to avoid removing rows added
+   * concurrently.
    *
    * @return this for method chaining
    */
diff --git a/api/src/main/java/org/apache/iceberg/ReplaceSortOrder.java b/api/src/main/java/org/apache/iceberg/ReplaceSortOrder.java
index 825c86b1de..0b63fdd515 100644
--- a/api/src/main/java/org/apache/iceberg/ReplaceSortOrder.java
+++ b/api/src/main/java/org/apache/iceberg/ReplaceSortOrder.java
@@ -16,18 +16,17 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 /**
  * API for replacing table sort order with a newly created order.
- * <p>
- * The table sort order is used to sort incoming records in engines that can request an ordering.
- * <p>
- * Apply returns the new sort order for validation.
- * <p>
- * When committing, these changes will be applied to the current table metadata. Commit conflicts
+ *
+ * <p>The table sort order is used to sort incoming records in engines that can request an ordering.
+ *
+ * <p>Apply returns the new sort order for validation.
+ *
+ * <p>When committing, these changes will be applied to the current table metadata. Commit conflicts
  * will be resolved by applying the pending changes to the new table metadata.
  */
-public interface ReplaceSortOrder extends PendingUpdate<SortOrder>, SortOrderBuilder<ReplaceSortOrder> {
-}
+public interface ReplaceSortOrder
+    extends PendingUpdate<SortOrder>, SortOrderBuilder<ReplaceSortOrder> {}
diff --git a/api/src/main/java/org/apache/iceberg/RewriteFiles.java b/api/src/main/java/org/apache/iceberg/RewriteFiles.java
index 1e13a534f6..c392c7118d 100644
--- a/api/src/main/java/org/apache/iceberg/RewriteFiles.java
+++ b/api/src/main/java/org/apache/iceberg/RewriteFiles.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.Set;
@@ -25,11 +24,11 @@ import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
 
 /**
  * API for replacing files in a table.
- * <p>
- * This API accumulates file additions and deletions, produces a new {@link Snapshot} of the
+ *
+ * <p>This API accumulates file additions and deletions, produces a new {@link Snapshot} of the
  * changes, and commits that snapshot as the current.
- * <p>
- * When committing, these changes will be applied to the latest table snapshot. Commit conflicts
+ *
+ * <p>When committing, these changes will be applied to the latest table snapshot. Commit conflicts
  * will be resolved by applying the changes to the new latest snapshot and reattempting the commit.
  * If any of the deleted files are no longer in the latest snapshot when reattempting, the commit
  * will throw a {@link ValidationException}.
@@ -39,46 +38,45 @@ public interface RewriteFiles extends SnapshotUpdate<RewriteFiles> {
    * Add a rewrite that replaces one set of data files with another set that contains the same data.
    *
    * @param filesToDelete files that will be replaced (deleted), cannot be null or empty.
-   * @param filesToAdd    files that will be added, cannot be null or empty.
+   * @param filesToAdd files that will be added, cannot be null or empty.
    * @return this for method chaining
    */
   default RewriteFiles rewriteFiles(Set<DataFile> filesToDelete, Set<DataFile> filesToAdd) {
-    return rewriteFiles(
-        filesToDelete,
-        ImmutableSet.of(),
-        filesToAdd,
-        ImmutableSet.of()
-    );
+    return rewriteFiles(filesToDelete, ImmutableSet.of(), filesToAdd, ImmutableSet.of());
   }
 
   /**
    * Add a rewrite that replaces one set of data files with another set that contains the same data.
    * The sequence number provided will be used for all the data files added.
    *
-   * @param filesToDelete  files that will be replaced (deleted), cannot be null or empty.
-   * @param filesToAdd     files that will be added, cannot be null or empty.
+   * @param filesToDelete files that will be replaced (deleted), cannot be null or empty.
+   * @param filesToAdd files that will be added, cannot be null or empty.
    * @param sequenceNumber sequence number to use for all data files added
    * @return this for method chaining
    */
-  RewriteFiles rewriteFiles(Set<DataFile> filesToDelete, Set<DataFile> filesToAdd, long sequenceNumber);
+  RewriteFiles rewriteFiles(
+      Set<DataFile> filesToDelete, Set<DataFile> filesToAdd, long sequenceNumber);
 
   /**
    * Add a rewrite that replaces one set of files with another set that contains the same data.
    *
-   * @param dataFilesToReplace   data files that will be replaced (deleted).
+   * @param dataFilesToReplace data files that will be replaced (deleted).
    * @param deleteFilesToReplace delete files that will be replaced (deleted).
-   * @param dataFilesToAdd      data files that will be added.
-   * @param deleteFilesToAdd    delete files that will be added.
+   * @param dataFilesToAdd data files that will be added.
+   * @param deleteFilesToAdd delete files that will be added.
    * @return this for method chaining.
    */
-  RewriteFiles rewriteFiles(Set<DataFile> dataFilesToReplace, Set<DeleteFile> deleteFilesToReplace,
-                            Set<DataFile> dataFilesToAdd, Set<DeleteFile> deleteFilesToAdd);
+  RewriteFiles rewriteFiles(
+      Set<DataFile> dataFilesToReplace,
+      Set<DeleteFile> deleteFilesToReplace,
+      Set<DataFile> dataFilesToAdd,
+      Set<DeleteFile> deleteFilesToAdd);
 
   /**
    * Set the snapshot ID used in any reads for this operation.
-   * <p>
-   * Validations will check changes after this snapshot ID. If this is not called, all ancestor snapshots through the
-   * table's initial snapshot are validated.
+   *
+   * <p>Validations will check changes after this snapshot ID. If this is not called, all ancestor
+   * snapshots through the table's initial snapshot are validated.
    *
    * @param snapshotId a snapshot ID
    * @return this for method chaining
diff --git a/api/src/main/java/org/apache/iceberg/RewriteJobOrder.java b/api/src/main/java/org/apache/iceberg/RewriteJobOrder.java
index 3b47dff782..2face482a5 100644
--- a/api/src/main/java/org/apache/iceberg/RewriteJobOrder.java
+++ b/api/src/main/java/org/apache/iceberg/RewriteJobOrder.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.Locale;
@@ -25,17 +24,25 @@ import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
 /**
  * Enum of supported rewrite job order, it defines the order in which the file groups should be
  * written.
- * <p><ul>
- * <li> bytes-asc: rewrite the smallest job groups first.
- * <li> bytes-desc: rewrite the largest job groups first.
- * <li> files-asc: rewrite the job groups with the least files first.
- * <li> files-desc: rewrite the job groups with the most files first.
- * <li> none: rewrite job groups in the order they were planned (no specific ordering).
- * </ul><p>
+ *
+ * <p>
+ *
+ * <ul>
+ *   <li>bytes-asc: rewrite the smallest job groups first.
+ *   <li>bytes-desc: rewrite the largest job groups first.
+ *   <li>files-asc: rewrite the job groups with the least files first.
+ *   <li>files-desc: rewrite the job groups with the most files first.
+ *   <li>none: rewrite job groups in the order they were planned (no specific ordering).
+ * </ul>
+ *
+ * <p>
  */
 public enum RewriteJobOrder {
-  BYTES_ASC("bytes-asc"), BYTES_DESC("bytes-desc"),
-  FILES_ASC("files-asc"), FILES_DESC("files-desc"), NONE("none");
+  BYTES_ASC("bytes-asc"),
+  BYTES_DESC("bytes-desc"),
+  FILES_ASC("files-asc"),
+  FILES_DESC("files-desc"),
+  NONE("none");
 
   private final String orderName;
 
@@ -49,7 +56,8 @@ public enum RewriteJobOrder {
 
   public static RewriteJobOrder fromName(String orderName) {
     Preconditions.checkArgument(orderName != null, "Rewrite job order name should not be null");
-    // Replace the hyphen in order name with underscore to map to the enum value. For example: bytes-asc to BYTES_ASC
+    // Replace the hyphen in order name with underscore to map to the enum value. For example:
+    // bytes-asc to BYTES_ASC
     return RewriteJobOrder.valueOf(orderName.replaceFirst("-", "_").toUpperCase(Locale.ENGLISH));
   }
 }
diff --git a/api/src/main/java/org/apache/iceberg/RewriteManifests.java b/api/src/main/java/org/apache/iceberg/RewriteManifests.java
index f6ca9e239b..ca823e94d2 100644
--- a/api/src/main/java/org/apache/iceberg/RewriteManifests.java
+++ b/api/src/main/java/org/apache/iceberg/RewriteManifests.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.function.Function;
@@ -24,16 +23,16 @@ import java.util.function.Predicate;
 
 /**
  * API for rewriting manifests for a table.
- * <p>
- * This API accumulates manifest files, produces a new {@link Snapshot} of the table
- * described only by the manifest files that were added, and commits that snapshot as the
- * current.
- * <p>
- * This API can be used to rewrite matching manifests according to a clustering function as well as
- * to replace specific manifests. Manifests that are deleted or added directly are ignored during
- * the rewrite process. The set of active files in replaced manifests must be the same as in new manifests.
- * <p>
- * When committing, these changes will be applied to the latest table snapshot. Commit conflicts
+ *
+ * <p>This API accumulates manifest files, produces a new {@link Snapshot} of the table described
+ * only by the manifest files that were added, and commits that snapshot as the current.
+ *
+ * <p>This API can be used to rewrite matching manifests according to a clustering function as well
+ * as to replace specific manifests. Manifests that are deleted or added directly are ignored during
+ * the rewrite process. The set of active files in replaced manifests must be the same as in new
+ * manifests.
+ *
+ * <p>When committing, these changes will be applied to the latest table snapshot. Commit conflicts
  * will be resolved by applying the changes to the new latest snapshot and reattempting the commit.
  */
 public interface RewriteManifests extends SnapshotUpdate<RewriteManifests> {
@@ -51,11 +50,11 @@ public interface RewriteManifests extends SnapshotUpdate<RewriteManifests> {
 
   /**
    * Determines which existing {@link ManifestFile} for the table should be rewritten. Manifests
-   * that do not match the predicate are kept as-is. If this is not called and no predicate is set, then
-   * all manifests will be rewritten.
+   * that do not match the predicate are kept as-is. If this is not called and no predicate is set,
+   * then all manifests will be rewritten.
    *
-   * @param predicate Predicate used to determine which manifests to rewrite. If true then the manifest
-   *                  file will be included for rewrite. If false then then manifest is kept as-is.
+   * @param predicate Predicate used to determine which manifests to rewrite. If true then the
+   *     manifest file will be included for rewrite. If false then then manifest is kept as-is.
    * @return this for method chaining
    */
   RewriteManifests rewriteIf(Predicate<ManifestFile> predicate);
@@ -71,17 +70,17 @@ public interface RewriteManifests extends SnapshotUpdate<RewriteManifests> {
   /**
    * Adds a {@link ManifestFile manifest file} to the table. The added manifest cannot contain new
    * or deleted files.
-   * <p>
-   * By default, the manifest will be rewritten to ensure all entries have explicit snapshot IDs.
-   * In that case, it is always the responsibility of the caller to manage the lifecycle of
-   * the original manifest.
-   * <p>
-   * If manifest entries are allowed to inherit the snapshot ID assigned on commit, the manifest
+   *
+   * <p>By default, the manifest will be rewritten to ensure all entries have explicit snapshot IDs.
+   * In that case, it is always the responsibility of the caller to manage the lifecycle of the
+   * original manifest.
+   *
+   * <p>If manifest entries are allowed to inherit the snapshot ID assigned on commit, the manifest
    * should never be deleted manually if the commit succeeds as it will become part of the table
    * metadata and will be cleaned up on expiry. If the manifest gets merged with others while
-   * preparing a new snapshot, it will be deleted automatically if this operation is successful.
-   * If the commit fails, the manifest will never be deleted and it is up to the caller whether
-   * to delete or reuse it.
+   * preparing a new snapshot, it will be deleted automatically if this operation is successful. If
+   * the commit fails, the manifest will never be deleted and it is up to the caller whether to
+   * delete or reuse it.
    *
    * @param manifest a manifest to add
    * @return this for method chaining
diff --git a/api/src/main/java/org/apache/iceberg/Rollback.java b/api/src/main/java/org/apache/iceberg/Rollback.java
index b733539436..48060beef3 100644
--- a/api/src/main/java/org/apache/iceberg/Rollback.java
+++ b/api/src/main/java/org/apache/iceberg/Rollback.java
@@ -16,18 +16,17 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import org.apache.iceberg.exceptions.CommitFailedException;
 
 /**
  * API for rolling table data back to the state at an older table {@link Snapshot snapshot}.
- * <p>
- * This API does not allow conflicting calls to {@link #toSnapshotId(long)} and
- * {@link #toSnapshotAtTime(long)}.
- * <p>
- * When committing, these changes will be applied to the current table metadata. Commit conflicts
+ *
+ * <p>This API does not allow conflicting calls to {@link #toSnapshotId(long)} and {@link
+ * #toSnapshotAtTime(long)}.
+ *
+ * <p>When committing, these changes will be applied to the current table metadata. Commit conflicts
  * will not be resolved and will result in a {@link CommitFailedException}.
  */
 public interface Rollback extends PendingUpdate<Snapshot> {
diff --git a/api/src/main/java/org/apache/iceberg/RowDelta.java b/api/src/main/java/org/apache/iceberg/RowDelta.java
index dcf250aff1..b8a44a6020 100644
--- a/api/src/main/java/org/apache/iceberg/RowDelta.java
+++ b/api/src/main/java/org/apache/iceberg/RowDelta.java
@@ -16,18 +16,17 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import org.apache.iceberg.expressions.Expression;
 
 /**
  * API for encoding row-level changes to a table.
- * <p>
- * This API accumulates data and delete file changes, produces a new {@link Snapshot} of the table, and commits
- * that snapshot as the current.
- * <p>
- * When committing, these changes will be applied to the latest table snapshot. Commit conflicts
+ *
+ * <p>This API accumulates data and delete file changes, produces a new {@link Snapshot} of the
+ * table, and commits that snapshot as the current.
+ *
+ * <p>When committing, these changes will be applied to the latest table snapshot. Commit conflicts
  * will be resolved by applying the changes to the new latest snapshot and reattempting the commit.
  */
 public interface RowDelta extends SnapshotUpdate<RowDelta> {
@@ -49,9 +48,9 @@ public interface RowDelta extends SnapshotUpdate<RowDelta> {
 
   /**
    * Set the snapshot ID used in any reads for this operation.
-   * <p>
-   * Validations will check changes after this snapshot ID. If the from snapshot is not set, all ancestor snapshots
-   * through the table's initial snapshot are validated.
+   *
+   * <p>Validations will check changes after this snapshot ID. If the from snapshot is not set, all
+   * ancestor snapshots through the table's initial snapshot are validated.
    *
    * @param snapshotId a snapshot ID
    * @return this for method chaining
@@ -67,14 +66,15 @@ public interface RowDelta extends SnapshotUpdate<RowDelta> {
   RowDelta caseSensitive(boolean caseSensitive);
 
   /**
-   * Add data file paths that must not be removed by conflicting commits for this RowDelta to succeed.
-   * <p>
-   * If any path has been removed by a conflicting commit in the table since the snapshot passed to
-   * {@link #validateFromSnapshot(long)}, the operation will fail with a
-   * {@link org.apache.iceberg.exceptions.ValidationException}.
-   * <p>
-   * By default, this validation checks only rewrite and overwrite commits. To apply validation to delete commits, call
-   * {@link #validateDeletedFiles()}.
+   * Add data file paths that must not be removed by conflicting commits for this RowDelta to
+   * succeed.
+   *
+   * <p>If any path has been removed by a conflicting commit in the table since the snapshot passed
+   * to {@link #validateFromSnapshot(long)}, the operation will fail with a {@link
+   * org.apache.iceberg.exceptions.ValidationException}.
+   *
+   * <p>By default, this validation checks only rewrite and overwrite commits. To apply validation
+   * to delete commits, call {@link #validateDeletedFiles()}.
    *
    * @param referencedFiles file paths that are referenced by a position delete file
    * @return this for method chaining
@@ -82,35 +82,39 @@ public interface RowDelta extends SnapshotUpdate<RowDelta> {
   RowDelta validateDataFilesExist(Iterable<? extends CharSequence> referencedFiles);
 
   /**
-   * Enable validation that referenced data files passed to {@link #validateDataFilesExist(Iterable)} have not been
-   * removed by a delete operation.
-   * <p>
-   * If a data file has a row deleted using a position delete file, rewriting or overwriting the data file concurrently
-   * would un-delete the row. Deleting the data file is normally allowed, but a delete may be part of a transaction
-   * that reads and re-appends a row. This method is used to validate deletes for the transaction case.
+   * Enable validation that referenced data files passed to {@link
+   * #validateDataFilesExist(Iterable)} have not been removed by a delete operation.
+   *
+   * <p>If a data file has a row deleted using a position delete file, rewriting or overwriting the
+   * data file concurrently would un-delete the row. Deleting the data file is normally allowed, but
+   * a delete may be part of a transaction that reads and re-appends a row. This method is used to
+   * validate deletes for the transaction case.
    *
    * @return this for method chaining
    */
   RowDelta validateDeletedFiles();
 
   /**
-   * Enables validation that data files added concurrently do not conflict with this commit's operation.
-   * <p>
-   * This method should be called when the table is queried to determine which files to delete/append.
-   * If a concurrent operation commits a new file after the data was read and that file might
-   * contain rows matching the specified conflict detection filter, the overwrite operation
-   * will detect this during retries and fail.
-   * <p>
-   * Calling this method with a correct conflict detection filter is required to maintain
-   * serializable isolation for update/delete operations. Otherwise, the isolation level
-   * will be snapshot isolation.
-   * <p>
-   * Validation applies to files added to the table since the snapshot passed to {@link #validateFromSnapshot(long)}.
+   * Enables validation that data files added concurrently do not conflict with this commit's
+   * operation.
+   *
+   * <p>This method should be called when the table is queried to determine which files to
+   * delete/append. If a concurrent operation commits a new file after the data was read and that
+   * file might contain rows matching the specified conflict detection filter, the overwrite
+   * operation will detect this during retries and fail.
+   *
+   * <p>Calling this method with a correct conflict detection filter is required to maintain
+   * serializable isolation for update/delete operations. Otherwise, the isolation level will be
+   * snapshot isolation.
+   *
+   * <p>Validation applies to files added to the table since the snapshot passed to {@link
+   * #validateFromSnapshot(long)}.
    *
    * @param conflictDetectionFilter an expression on rows in the table
    * @return this for method chaining
-   * @deprecated since 0.13.0, will be removed in 0.14.0; use {@link #conflictDetectionFilter(Expression)} and
-   *             {@link #validateNoConflictingDataFiles()} instead.
+   * @deprecated since 0.13.0, will be removed in 0.14.0; use {@link
+   *     #conflictDetectionFilter(Expression)} and {@link #validateNoConflictingDataFiles()}
+   *     instead.
    */
   @Deprecated
   default RowDelta validateNoConflictingAppends(Expression conflictDetectionFilter) {
@@ -120,8 +124,8 @@ public interface RowDelta extends SnapshotUpdate<RowDelta> {
 
   /**
    * Sets a conflict detection filter used to validate concurrently added data and delete files.
-   * <p>
-   * If not called, a true literal will be used as the conflict detection filter.
+   *
+   * <p>If not called, a true literal will be used as the conflict detection filter.
    *
    * @param conflictDetectionFilter an expression on rows in the table
    * @return this for method chaining
@@ -129,32 +133,36 @@ public interface RowDelta extends SnapshotUpdate<RowDelta> {
   RowDelta conflictDetectionFilter(Expression conflictDetectionFilter);
 
   /**
-   * Enables validation that data files added concurrently do not conflict with this commit's operation.
-   * <p>
-   * This method should be called when the table is queried to determine which files to delete/append.
-   * If a concurrent operation commits a new file after the data was read and that file might
-   * contain rows matching the specified conflict detection filter, this operation
-   * will detect this during retries and fail.
-   * <p>
-   * Calling this method is required to maintain serializable isolation for update/delete operations.
-   * Otherwise, the isolation level will be snapshot isolation.
-   * <p>
-   * Validation uses the conflict detection filter passed to {@link #conflictDetectionFilter(Expression)} and
-   * applies to operations that happened after the snapshot passed to {@link #validateFromSnapshot(long)}.
+   * Enables validation that data files added concurrently do not conflict with this commit's
+   * operation.
+   *
+   * <p>This method should be called when the table is queried to determine which files to
+   * delete/append. If a concurrent operation commits a new file after the data was read and that
+   * file might contain rows matching the specified conflict detection filter, this operation will
+   * detect this during retries and fail.
+   *
+   * <p>Calling this method is required to maintain serializable isolation for update/delete
+   * operations. Otherwise, the isolation level will be snapshot isolation.
+   *
+   * <p>Validation uses the conflict detection filter passed to {@link
+   * #conflictDetectionFilter(Expression)} and applies to operations that happened after the
+   * snapshot passed to {@link #validateFromSnapshot(long)}.
    *
    * @return this for method chaining
    */
   RowDelta validateNoConflictingDataFiles();
 
   /**
-   * Enables validation that delete files added concurrently do not conflict with this commit's operation.
-   * <p>
-   * This method must be called when the table is queried to produce a row delta for UPDATE and
-   * MERGE operations independently of the isolation level. Calling this method isn't required
-   * for DELETE operations as it is OK to delete a record that is also deleted concurrently.
-   * <p>
-   * Validation uses the conflict detection filter passed to {@link #conflictDetectionFilter(Expression)} and
-   * applies to operations that happened after the snapshot passed to {@link #validateFromSnapshot(long)}.
+   * Enables validation that delete files added concurrently do not conflict with this commit's
+   * operation.
+   *
+   * <p>This method must be called when the table is queried to produce a row delta for UPDATE and
+   * MERGE operations independently of the isolation level. Calling this method isn't required for
+   * DELETE operations as it is OK to delete a record that is also deleted concurrently.
+   *
+   * <p>Validation uses the conflict detection filter passed to {@link
+   * #conflictDetectionFilter(Expression)} and applies to operations that happened after the
+   * snapshot passed to {@link #validateFromSnapshot(long)}.
    *
    * @return this for method chaining
    */
diff --git a/api/src/main/java/org/apache/iceberg/Scan.java b/api/src/main/java/org/apache/iceberg/Scan.java
index 72e0056490..118b9ce66b 100644
--- a/api/src/main/java/org/apache/iceberg/Scan.java
+++ b/api/src/main/java/org/apache/iceberg/Scan.java
@@ -16,8 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
-
 package org.apache.iceberg;
 
 import java.util.Collection;
@@ -26,8 +24,8 @@ import org.apache.iceberg.expressions.Expression;
 import org.apache.iceberg.io.CloseableIterable;
 
 /**
- * Scan objects are immutable and can be shared between threads. Refinement methods, like
- * {@link #select(Collection)} and {@link #filter(Expression)}, create new TableScan instances.
+ * Scan objects are immutable and can be shared between threads. Refinement methods, like {@link
+ * #select(Collection)} and {@link #filter(Expression)}, create new TableScan instances.
  *
  * @param <ThisT> the child Java API class, returned by method chaining
  * @param <T> the Java type of tasks produces by this scan
@@ -35,8 +33,8 @@ import org.apache.iceberg.io.CloseableIterable;
  */
 public interface Scan<ThisT, T extends ScanTask, G extends ScanTaskGroup<T>> {
   /**
-   * Create a new scan from this scan's configuration that will override the {@link Table}'s behavior based
-   * on the incoming pair. Unknown properties will be ignored.
+   * Create a new scan from this scan's configuration that will override the {@link Table}'s
+   * behavior based on the incoming pair. Unknown properties will be ignored.
    *
    * @param property name of the table property to be overridden
    * @param value value to override with
@@ -53,9 +51,9 @@ public interface Scan<ThisT, T extends ScanTask, G extends ScanTaskGroup<T>> {
   ThisT project(Schema schema);
 
   /**
-   * Create a new scan from this that, if data columns where selected
-   * via {@link #select(java.util.Collection)}, controls whether the match to the schema will be done
-   * with case sensitivity. Default is true.
+   * Create a new scan from this that, if data columns where selected via {@link
+   * #select(java.util.Collection)}, controls whether the match to the schema will be done with case
+   * sensitivity. Default is true.
    *
    * @return a new scan based on this with case sensitivity as stated
    */
@@ -63,17 +61,17 @@ public interface Scan<ThisT, T extends ScanTask, G extends ScanTaskGroup<T>> {
 
   /**
    * Create a new scan from this that loads the column stats with each data file.
-   * <p>
-   * Column stats include: value count, null value count, lower bounds, and upper bounds.
+   *
+   * <p>Column stats include: value count, null value count, lower bounds, and upper bounds.
    *
    * @return a new scan based on this that loads column stats.
    */
   ThisT includeColumnStats();
 
   /**
-   * Create a new scan from this that will read the given data columns. This produces
-   * an expected schema that includes all fields that are either selected or used by this scan's
-   * filter expression.
+   * Create a new scan from this that will read the given data columns. This produces an expected
+   * schema that includes all fields that are either selected or used by this scan's filter
+   * expression.
    *
    * @param columns column names from the table's schema
    * @return a new scan based on this with the given projection columns
@@ -89,15 +87,16 @@ public interface Scan<ThisT, T extends ScanTask, G extends ScanTaskGroup<T>> {
   ThisT filter(Expression expr);
 
   /**
-   * Create a new scan from this that applies data filtering to files but not to rows in those files.
+   * Create a new scan from this that applies data filtering to files but not to rows in those
+   * files.
    *
    * @return a new scan based on this that does not filter rows in files.
    */
   ThisT ignoreResiduals();
 
   /**
-   * Create a new scan to use a particular executor to plan. The default worker pool will be
-   * used by default.
+   * Create a new scan to use a particular executor to plan. The default worker pool will be used by
+   * default.
    *
    * @param executorService the provided executor
    * @return a table scan that uses the provided executor to access manifests
@@ -106,11 +105,13 @@ public interface Scan<ThisT, T extends ScanTask, G extends ScanTaskGroup<T>> {
 
   /**
    * Returns this scan's projection {@link Schema}.
-   * <p>
-   * If the projection schema was set directly using {@link #project(Schema)}, returns that schema.
-   * <p>
-   * If the projection schema was set by calling {@link #select(Collection)}, returns a projection
-   * schema that includes the selected data fields and any fields used in the filter expression.
+   *
+   * <p>If the projection schema was set directly using {@link #project(Schema)}, returns that
+   * schema.
+   *
+   * <p>If the projection schema was set by calling {@link #select(Collection)}, returns a
+   * projection schema that includes the selected data fields and any fields used in the filter
+   * expression.
    *
    * @return this scan's projection schema
    */
@@ -118,9 +119,9 @@ public interface Scan<ThisT, T extends ScanTask, G extends ScanTaskGroup<T>> {
 
   /**
    * Plan tasks for this scan where each task reads a single file.
-   * <p>
-   * Use {@link #planTasks()} for planning balanced tasks where each task will read either a single file,
-   * a part of a file, or multiple files.
+   *
+   * <p>Use {@link #planTasks()} for planning balanced tasks where each task will read either a
+   * single file, a part of a file, or multiple files.
    *
    * @return an Iterable of tasks scanning entire files required by this scan
    */
@@ -128,25 +129,20 @@ public interface Scan<ThisT, T extends ScanTask, G extends ScanTaskGroup<T>> {
 
   /**
    * Plan balanced task groups for this scan by splitting large and combining small tasks.
-   * <p>
-   * Task groups created by this method may read partial input files, multiple input files or both.
+   *
+   * <p>Task groups created by this method may read partial input files, multiple input files or
+   * both.
    *
    * @return an Iterable of balanced task groups required by this scan
    */
   CloseableIterable<G> planTasks();
 
-  /**
-   * Returns the target split size for this scan.
-   */
+  /** Returns the target split size for this scan. */
   long targetSplitSize();
 
-  /**
-   * Returns the split lookback for this scan.
-   */
+  /** Returns the split lookback for this scan. */
   int splitLookback();
 
-  /**
-   * Returns the split open file cost for this scan.
-   */
+  /** Returns the split open file cost for this scan. */
   long splitOpenFileCost();
 }
diff --git a/api/src/main/java/org/apache/iceberg/ScanTask.java b/api/src/main/java/org/apache/iceberg/ScanTask.java
index 1b202f506a..3468f5e980 100644
--- a/api/src/main/java/org/apache/iceberg/ScanTask.java
+++ b/api/src/main/java/org/apache/iceberg/ScanTask.java
@@ -16,14 +16,11 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.io.Serializable;
 
-/**
- * A scan task.
- */
+/** A scan task. */
 public interface ScanTask extends Serializable {
   /**
    * The number of bytes that should be read by this scan task.
@@ -43,9 +40,7 @@ public interface ScanTask extends Serializable {
     return 1;
   }
 
-  /**
-   * Returns true if this is a {@link FileScanTask}, false otherwise.
-   */
+  /** Returns true if this is a {@link FileScanTask}, false otherwise. */
   default boolean isFileScanTask() {
     return false;
   }
@@ -60,9 +55,7 @@ public interface ScanTask extends Serializable {
     throw new IllegalStateException("Not a FileScanTask: " + this);
   }
 
-  /**
-   * Returns true if this is a {@link DataTask}, false otherwise.
-   */
+  /** Returns true if this is a {@link DataTask}, false otherwise. */
   default boolean isDataTask() {
     return false;
   }
diff --git a/api/src/main/java/org/apache/iceberg/ScanTaskGroup.java b/api/src/main/java/org/apache/iceberg/ScanTaskGroup.java
index 71a2d3fa43..4aabc1a064 100644
--- a/api/src/main/java/org/apache/iceberg/ScanTaskGroup.java
+++ b/api/src/main/java/org/apache/iceberg/ScanTaskGroup.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.Collection;
@@ -27,9 +26,7 @@ import java.util.Collection;
  * @param <T> the type of scan tasks
  */
 public interface ScanTaskGroup<T extends ScanTask> extends ScanTask {
-  /**
-   * Returns scan tasks in this group.
-   */
+  /** Returns scan tasks in this group. */
   Collection<T> tasks();
 
   @Override
diff --git a/api/src/main/java/org/apache/iceberg/Schema.java b/api/src/main/java/org/apache/iceberg/Schema.java
index 987046a27b..34105a00ad 100644
--- a/api/src/main/java/org/apache/iceberg/Schema.java
+++ b/api/src/main/java/org/apache/iceberg/Schema.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.io.Serializable;
@@ -45,9 +44,9 @@ import org.apache.iceberg.types.Types.StructType;
 
 /**
  * The schema of a data table.
- * <p>
- * Schema ID will only be populated when reading from/writing to table metadata,
- * otherwise it will be default to 0.
+ *
+ * <p>Schema ID will only be populated when reading from/writing to table metadata, otherwise it
+ * will be default to 0.
  */
 public class Schema implements Serializable {
   private static final Joiner NEWLINE = Joiner.on('\n');
@@ -71,7 +70,8 @@ public class Schema implements Serializable {
     this(columns, aliases, ImmutableSet.of());
   }
 
-  public Schema(List<NestedField> columns, Map<String, Integer> aliases, Set<Integer> identifierFieldIds) {
+  public Schema(
+      List<NestedField> columns, Map<String, Integer> aliases, Set<Integer> identifierFieldIds) {
     this(DEFAULT_SCHEMA_ID, columns, aliases, identifierFieldIds);
   }
 
@@ -91,8 +91,11 @@ public class Schema implements Serializable {
     this(schemaId, columns, null, identifierFieldIds);
   }
 
-  public Schema(int schemaId, List<NestedField> columns, Map<String, Integer> aliases,
-                Set<Integer> identifierFieldIds) {
+  public Schema(
+      int schemaId,
+      List<NestedField> columns,
+      Map<String, Integer> aliases,
+      Set<Integer> identifierFieldIds) {
     this.schemaId = schemaId;
     this.struct = StructType.of(columns);
     this.aliasToId = aliases != null ? ImmutableBiMap.copyOf(aliases) : null;
@@ -103,23 +106,31 @@ public class Schema implements Serializable {
       identifierFieldIds.forEach(id -> validateIdentifierField(id, lazyIdToField(), idToParent));
     }
 
-    this.identifierFieldIds = identifierFieldIds != null ? Ints.toArray(identifierFieldIds) : new int[0];
+    this.identifierFieldIds =
+        identifierFieldIds != null ? Ints.toArray(identifierFieldIds) : new int[0];
 
     this.highestFieldId = lazyIdToName().keySet().stream().mapToInt(i -> i).max().orElse(0);
   }
 
-  static void validateIdentifierField(int fieldId, Map<Integer, Types.NestedField> idToField,
-                                              Map<Integer, Integer> idToParent) {
+  static void validateIdentifierField(
+      int fieldId, Map<Integer, Types.NestedField> idToField, Map<Integer, Integer> idToParent) {
     Types.NestedField field = idToField.get(fieldId);
-    Preconditions.checkArgument(field != null,
-        "Cannot add fieldId %s as an identifier field: field does not exist", fieldId);
-    Preconditions.checkArgument(field.type().isPrimitiveType(),
-        "Cannot add field %s as an identifier field: not a primitive type field", field.name());
-    Preconditions.checkArgument(field.isRequired(),
-        "Cannot add field %s as an identifier field: not a required field", field.name());
-    Preconditions.checkArgument(!Types.DoubleType.get().equals(field.type()) &&
-            !Types.FloatType.get().equals(field.type()),
-        "Cannot add field %s as an identifier field: must not be float or double field", field.name());
+    Preconditions.checkArgument(
+        field != null,
+        "Cannot add fieldId %s as an identifier field: field does not exist",
+        fieldId);
+    Preconditions.checkArgument(
+        field.type().isPrimitiveType(),
+        "Cannot add field %s as an identifier field: not a primitive type field",
+        field.name());
+    Preconditions.checkArgument(
+        field.isRequired(),
+        "Cannot add field %s as an identifier field: not a required field",
+        field.name());
+    Preconditions.checkArgument(
+        !Types.DoubleType.get().equals(field.type()) && !Types.FloatType.get().equals(field.type()),
+        "Cannot add field %s as an identifier field: must not be float or double field",
+        field.name());
 
     // check whether the nested field is in a chain of required struct fields
     // exploring from root for better error message for list and map types
@@ -132,11 +143,16 @@ public class Schema implements Serializable {
 
     while (!deque.isEmpty()) {
       Types.NestedField parent = idToField.get(deque.pop());
-      Preconditions.checkArgument(parent.type().isStructType(),
-          "Cannot add field %s as an identifier field: must not be nested in %s", field.name(), parent);
-      Preconditions.checkArgument(parent.isRequired(),
+      Preconditions.checkArgument(
+          parent.type().isStructType(),
+          "Cannot add field %s as an identifier field: must not be nested in %s",
+          field.name(),
+          parent);
+      Preconditions.checkArgument(
+          parent.isRequired(),
           "Cannot add field %s as an identifier field: must not be nested in an optional field %s",
-          field.name(), parent);
+          field.name(),
+          parent);
     }
   }
 
@@ -192,25 +208,23 @@ public class Schema implements Serializable {
 
   /**
    * Returns the schema ID for this schema.
-   * <p>
-   * Note that schema ID will only be populated when reading from/writing to table metadata,
+   *
+   * <p>Note that schema ID will only be populated when reading from/writing to table metadata,
    * otherwise it will be default to 0.
    */
   public int schemaId() {
     return this.schemaId;
   }
 
-  /**
-   * Returns the highest field ID in this schema, including nested fields.
-   */
+  /** Returns the highest field ID in this schema, including nested fields. */
   public int highestFieldId() {
     return highestFieldId;
   }
 
   /**
    * Returns an alias map for this schema, if set.
-   * <p>
-   * Alias maps are created when translating an external schema, like an Avro Schema, to this
+   *
+   * <p>Alias maps are created when translating an external schema, like an Avro Schema, to this
    * format. The original column names can be provided in a Map when constructing this Schema.
    *
    * @return a Map of column aliases to field ids
@@ -228,28 +242,28 @@ public class Schema implements Serializable {
     return struct;
   }
 
-  /**
-   * Returns a List of the {@link NestedField columns} in this Schema.
-   */
+  /** Returns a List of the {@link NestedField columns} in this Schema. */
   public List<NestedField> columns() {
     return struct.fields();
   }
 
   /**
    * The set of identifier field IDs.
-   * <p>
-   * Identifier is a concept similar to primary key in a relational database system.
-   * It consists of a unique set of primitive fields in the schema.
-   * An identifier field must be at root, or nested in a chain of structs (no maps or lists).
-   * A row should be unique in a table based on the values of the identifier fields.
-   * Optional, float and double columns cannot be used as identifier fields.
-   * However, Iceberg identifier differs from primary key in the following ways:
+   *
+   * <p>Identifier is a concept similar to primary key in a relational database system. It consists
+   * of a unique set of primitive fields in the schema. An identifier field must be at root, or
+   * nested in a chain of structs (no maps or lists). A row should be unique in a table based on the
+   * values of the identifier fields. Optional, float and double columns cannot be used as
+   * identifier fields. However, Iceberg identifier differs from primary key in the following ways:
+   *
    * <ul>
-   * <li>Iceberg does not enforce the uniqueness of a row based on this identifier information.
-   * It is used for operations like upsert to define the default upsert key.</li>
-   * <li>A nested field in a struct can be used as an identifier. For example, if there is a "last_name" field
-   * inside a "user" struct in a schema, field "user.last_name" can be set as a part of the identifier field.</li>
+   *   <li>Iceberg does not enforce the uniqueness of a row based on this identifier information. It
+   *       is used for operations like upsert to define the default upsert key.
+   *   <li>A nested field in a struct can be used as an identifier. For example, if there is a
+   *       "last_name" field inside a "user" struct in a schema, field "user.last_name" can be set
+   *       as a part of the identifier field.
    * </ul>
+   *
    * <p>
    *
    * @return the set of identifier field IDs in this schema.
@@ -258,14 +272,11 @@ public class Schema implements Serializable {
     return lazyIdentifierFieldIdSet();
   }
 
-  /**
-   * Returns the set of identifier field names.
-   */
+  /** Returns the set of identifier field names. */
   public Set<String> identifierFieldNames() {
-    return identifierFieldIds()
-            .stream()
-            .map(id -> lazyIdToName().get(id))
-            .collect(Collectors.toSet());
+    return identifierFieldIds().stream()
+        .map(id -> lazyIdToName().get(id))
+        .collect(Collectors.toSet());
   }
 
   /**
@@ -277,7 +288,7 @@ public class Schema implements Serializable {
   public Type findType(String name) {
     Preconditions.checkArgument(!name.isEmpty(), "Invalid column name: (empty)");
     Integer id = lazyNameToId().get(name);
-    if (id != null) {  // name is found
+    if (id != null) { // name is found
       return findType(id);
     }
 
@@ -311,8 +322,8 @@ public class Schema implements Serializable {
 
   /**
    * Returns a sub-field by name as a {@link NestedField}.
-   * <p>
-   * The result may be a top-level or a nested field.
+   *
+   * <p>The result may be a top-level or a nested field.
    *
    * @param name a String name
    * @return a Type for the sub-field or null if it is not found
@@ -328,8 +339,8 @@ public class Schema implements Serializable {
 
   /**
    * Returns a sub-field by name as a {@link NestedField}.
-   * <p>
-   * The result may be a top-level or a nested field.
+   *
+   * <p>The result may be a top-level or a nested field.
    *
    * @param name a String name
    * @return the sub-field or null if it is not found
@@ -354,8 +365,8 @@ public class Schema implements Serializable {
   }
 
   /**
-   * Returns the column id for the given column alias. Column aliases are set
-   * by conversions from Parquet or Avro to this Schema type.
+   * Returns the column id for the given column alias. Column aliases are set by conversions from
+   * Parquet or Avro to this Schema type.
    *
    * @param alias a full column name in the unconverted data schema
    * @return the column id in this schema, or null if the column wasn't found
@@ -368,8 +379,8 @@ public class Schema implements Serializable {
   }
 
   /**
-   * Returns the full column name in the unconverted data schema for the given column id.
-   * Column aliases are set by conversions from Parquet or Avro to this Schema type.
+   * Returns the full column name in the unconverted data schema for the given column id. Column
+   * aliases are set by conversions from Parquet or Avro to this Schema type.
    *
    * @param fieldId a column id in this schema
    * @return the full column name in the unconverted data schema, or null if one wasn't found
@@ -383,8 +394,8 @@ public class Schema implements Serializable {
 
   /**
    * Returns an accessor for retrieving the data from {@link StructLike}.
-   * <p>
-   * Accessors do not retrieve data contained in lists or maps.
+   *
+   * <p>Accessors do not retrieve data contained in lists or maps.
    *
    * @param id a column id in this schema
    * @return an {@link Accessor} to retrieve values from a {@link StructLike} row
@@ -395,8 +406,8 @@ public class Schema implements Serializable {
 
   /**
    * Creates a projection schema for a subset of columns, selected by name.
-   * <p>
-   * Names that identify nested fields will select part or all of the field's top-level column.
+   *
+   * <p>Names that identify nested fields will select part or all of the field's top-level column.
    *
    * @param names String names for selected columns
    * @return a projection schema from this schema, by name
@@ -407,8 +418,8 @@ public class Schema implements Serializable {
 
   /**
    * Creates a projection schema for a subset of columns, selected by name.
-   * <p>
-   * Names that identify nested fields will select part or all of the field's top-level column.
+   *
+   * <p>Names that identify nested fields will select part or all of the field's top-level column.
    *
    * @param names a List of String names for selected columns
    * @return a projection schema from this schema, by name
@@ -419,8 +430,8 @@ public class Schema implements Serializable {
 
   /**
    * Creates a projection schema for a subset of columns, selected by case insensitive names
-   * <p>
-   * Names that identify nested fields will select part or all of the field's top-level column.
+   *
+   * <p>Names that identify nested fields will select part or all of the field's top-level column.
    *
    * @param names a List of String names for selected columns
    * @return a projection schema from this schema, by names
@@ -431,8 +442,8 @@ public class Schema implements Serializable {
 
   /**
    * Creates a projection schema for a subset of columns, selected by case insensitive names
-   * <p>
-   * Names that identify nested fields will select part or all of the field's top-level column.
+   *
+   * <p>Names that identify nested fields will select part or all of the field's top-level column.
    *
    * @param names a List of String names for selected columns
    * @return a projection schema from this schema, by names
@@ -443,12 +454,13 @@ public class Schema implements Serializable {
 
   /**
    * Checks whether this schema is equivalent to another schema while ignoring the schema ID.
+   *
    * @param anotherSchema another schema
    * @return true if this schema is equivalent to the given schema
    */
   public boolean sameSchema(Schema anotherSchema) {
-    return asStruct().equals(anotherSchema.asStruct()) &&
-        identifierFieldIds().equals(anotherSchema.identifierFieldIds());
+    return asStruct().equals(anotherSchema.asStruct())
+        && identifierFieldIds().equals(anotherSchema.identifierFieldIds());
   }
 
   private Schema internalSelect(Collection<String> names, boolean caseSensitive) {
@@ -479,9 +491,11 @@ public class Schema implements Serializable {
 
   @Override
   public String toString() {
-    return String.format("table {\n%s\n}",
-        NEWLINE.join(struct.fields().stream()
-            .map(this::identifierFieldToString)
-            .collect(Collectors.toList())));
+    return String.format(
+        "table {\n%s\n}",
+        NEWLINE.join(
+            struct.fields().stream()
+                .map(this::identifierFieldToString)
+                .collect(Collectors.toList())));
   }
 }
diff --git a/api/src/main/java/org/apache/iceberg/Snapshot.java b/api/src/main/java/org/apache/iceberg/Snapshot.java
index cfaa7f9b24..e998fbc4b6 100644
--- a/api/src/main/java/org/apache/iceberg/Snapshot.java
+++ b/api/src/main/java/org/apache/iceberg/Snapshot.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.io.Serializable;
@@ -26,17 +25,17 @@ import org.apache.iceberg.io.FileIO;
 
 /**
  * A snapshot of the data in a table at a point in time.
- * <p>
- * A snapshot consist of one or more file manifests, and the complete table contents is the union
+ *
+ * <p>A snapshot consist of one or more file manifests, and the complete table contents is the union
  * of all the data files in those manifests.
- * <p>
- * Snapshots are created by table operations, like {@link AppendFiles} and {@link RewriteFiles}.
+ *
+ * <p>Snapshots are created by table operations, like {@link AppendFiles} and {@link RewriteFiles}.
  */
 public interface Snapshot extends Serializable {
   /**
    * Return this snapshot's sequence number.
-   * <p>
-   * Sequence numbers are assigned when a snapshot is committed.
+   *
+   * <p>Sequence numbers are assigned when a snapshot is committed.
    *
    * @return a long sequence number
    */
@@ -58,8 +57,8 @@ public interface Snapshot extends Serializable {
 
   /**
    * Return this snapshot's timestamp.
-   * <p>
-   * This timestamp is the same as those produced by {@link System#currentTimeMillis()}.
+   *
+   * <p>This timestamp is the same as those produced by {@link System#currentTimeMillis()}.
    *
    * @return a long timestamp in milliseconds
    */
@@ -69,7 +68,8 @@ public interface Snapshot extends Serializable {
    * Return all {@link ManifestFile} instances for either data or delete manifests in this snapshot.
    *
    * @return a list of ManifestFile
-   * @deprecated since 0.14.0, will be removed in 1.0.0; Use {@link Snapshot#allManifests(FileIO)} instead.
+   * @deprecated since 0.14.0, will be removed in 1.0.0; Use {@link Snapshot#allManifests(FileIO)}
+   *     instead.
    */
   @Deprecated
   List<ManifestFile> allManifests();
@@ -86,7 +86,8 @@ public interface Snapshot extends Serializable {
    * Return a {@link ManifestFile} for each data manifest in this snapshot.
    *
    * @return a list of ManifestFile
-   * @deprecated since 0.14.0, will be removed in 1.0.0; Use {@link Snapshot#dataManifests(FileIO)} instead.
+   * @deprecated since 0.14.0, will be removed in 1.0.0; Use {@link Snapshot#dataManifests(FileIO)}
+   *     instead.
    */
   @Deprecated
   List<ManifestFile> dataManifests();
@@ -103,7 +104,8 @@ public interface Snapshot extends Serializable {
    * Return a {@link ManifestFile} for each delete manifest in this snapshot.
    *
    * @return a list of ManifestFile
-   * @deprecated since 0.14.0, will be removed in 1.0.0; Use {@link Snapshot#deleteManifests(FileIO)} instead.
+   * @deprecated since 0.14.0, will be removed in 1.0.0; Use {@link
+   *     Snapshot#deleteManifests(FileIO)} instead.
    */
   @Deprecated
   List<ManifestFile> deleteManifests();
@@ -133,20 +135,21 @@ public interface Snapshot extends Serializable {
 
   /**
    * Return all data files added to the table in this snapshot.
-   * <p>
-   * The files returned include the following columns: file_path, file_format, partition,
+   *
+   * <p>The files returned include the following columns: file_path, file_format, partition,
    * record_count, and file_size_in_bytes. Other columns will be null.
    *
    * @return all data files added to the table in this snapshot.
-   * @deprecated since 0.14.0, will be removed in 1.0.0; Use {@link Snapshot#addedDataFiles(FileIO)} instead.
+   * @deprecated since 0.14.0, will be removed in 1.0.0; Use {@link Snapshot#addedDataFiles(FileIO)}
+   *     instead.
    */
   @Deprecated
   Iterable<DataFile> addedFiles();
 
   /**
    * Return all data files added to the table in this snapshot.
-   * <p>
-   * The files returned include the following columns: file_path, file_format, partition,
+   *
+   * <p>The files returned include the following columns: file_path, file_format, partition,
    * record_count, and file_size_in_bytes. Other columns will be null.
    *
    * @param io a {@link FileIO} instance used for reading files from storage
@@ -156,20 +159,21 @@ public interface Snapshot extends Serializable {
 
   /**
    * Return all data files deleted from the table in this snapshot.
-   * <p>
-   * The files returned include the following columns: file_path, file_format, partition,
+   *
+   * <p>The files returned include the following columns: file_path, file_format, partition,
    * record_count, and file_size_in_bytes. Other columns will be null.
    *
    * @return all data files deleted from the table in this snapshot.
-   * @deprecated since 0.14.0, will be removed in 1.0.0; Use {@link Snapshot#removedDataFiles(FileIO)} instead.
+   * @deprecated since 0.14.0, will be removed in 1.0.0; Use {@link
+   *     Snapshot#removedDataFiles(FileIO)} instead.
    */
   @Deprecated
   Iterable<DataFile> deletedFiles();
 
   /**
    * Return all data files removed from the table in this snapshot.
-   * <p>
-   * The files returned include the following columns: file_path, file_format, partition,
+   *
+   * <p>The files returned include the following columns: file_path, file_format, partition,
    * record_count, and file_size_in_bytes. Other columns will be null.
    *
    * @param io a {@link FileIO} instance used for reading files from storage
@@ -179,28 +183,30 @@ public interface Snapshot extends Serializable {
 
   /**
    * Return all delete files added to the table in this snapshot.
-   * <p>
-   * The files returned include the following columns: file_path, file_format, partition,
+   *
+   * <p>The files returned include the following columns: file_path, file_format, partition,
    * record_count, and file_size_in_bytes. Other columns will be null.
    *
    * @param io a {@link FileIO} instance used for reading files from storage
    * @return all delete files added to the table in this snapshot
    */
   default Iterable<DeleteFile> addedDeleteFiles(FileIO io) {
-    throw new UnsupportedOperationException(this.getClass().getName() + " doesn't implement addedDeleteFiles");
+    throw new UnsupportedOperationException(
+        this.getClass().getName() + " doesn't implement addedDeleteFiles");
   }
 
   /**
    * Return all delete files removed from the table in this snapshot.
-   * <p>
-   * The files returned include the following columns: file_path, file_format, partition,
+   *
+   * <p>The files returned include the following columns: file_path, file_format, partition,
    * record_count, and file_size_in_bytes. Other columns will be null.
    *
    * @param io a {@link FileIO} instance used for reading files from storage
    * @return all delete files removed from the table in this snapshot
    */
   default Iterable<DeleteFile> removedDeleteFiles(FileIO io) {
-    throw new UnsupportedOperationException(this.getClass().getName() + " doesn't implement removedDeleteFiles");
+    throw new UnsupportedOperationException(
+        this.getClass().getName() + " doesn't implement removedDeleteFiles");
   }
 
   /**
@@ -211,7 +217,8 @@ public interface Snapshot extends Serializable {
   String manifestListLocation();
 
   /**
-   * Return the id of the schema used when this snapshot was created, or null if this information is not available.
+   * Return the id of the schema used when this snapshot was created, or null if this information is
+   * not available.
    *
    * @return schema id associated with this snapshot
    */
diff --git a/api/src/main/java/org/apache/iceberg/SnapshotRef.java b/api/src/main/java/org/apache/iceberg/SnapshotRef.java
index 59ba741511..917281a9d2 100644
--- a/api/src/main/java/org/apache/iceberg/SnapshotRef.java
+++ b/api/src/main/java/org/apache/iceberg/SnapshotRef.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.io.Serializable;
@@ -86,11 +85,11 @@ public class SnapshotRef implements Serializable {
     }
 
     SnapshotRef ref = (SnapshotRef) other;
-    return ref.snapshotId == snapshotId &&
-        Objects.equals(ref.type(), type) &&
-        Objects.equals(ref.maxRefAgeMs(), maxRefAgeMs) &&
-        Objects.equals(ref.minSnapshotsToKeep(), minSnapshotsToKeep) &&
-        Objects.equals(ref.maxSnapshotAgeMs(), maxSnapshotAgeMs);
+    return ref.snapshotId == snapshotId
+        && Objects.equals(ref.type(), type)
+        && Objects.equals(ref.maxRefAgeMs(), maxRefAgeMs)
+        && Objects.equals(ref.minSnapshotsToKeep(), minSnapshotsToKeep)
+        && Objects.equals(ref.maxSnapshotAgeMs(), maxSnapshotAgeMs);
   }
 
   @Override
@@ -100,8 +99,7 @@ public class SnapshotRef implements Serializable {
         this.type,
         this.maxRefAgeMs,
         this.maxSnapshotAgeMs,
-        this.minSnapshotsToKeep
-    );
+        this.minSnapshotsToKeep);
   }
 
   public static Builder tagBuilder(long snapshotId) {
@@ -120,11 +118,13 @@ public class SnapshotRef implements Serializable {
   }
 
   /**
-   * Creates a ref builder from the given ref and its properties but the ref will now point to the given snapshotId.
+   * Creates a ref builder from the given ref and its properties but the ref will now point to the
+   * given snapshotId.
    *
    * @param ref Ref to build from
    * @param snapshotId snapshotID to use.
-   * @return ref builder with the same retention properties as given ref, but the ref will point to the passed in id
+   * @return ref builder with the same retention properties as given ref, but the ref will point to
+   *     the passed in id
    */
   public static Builder builderFrom(SnapshotRef ref, long snapshotId) {
     return new Builder(ref.type(), snapshotId)
@@ -152,25 +152,28 @@ public class SnapshotRef implements Serializable {
     }
 
     public Builder minSnapshotsToKeep(Integer value) {
-      Preconditions.checkArgument(value == null || !type.equals(SnapshotRefType.TAG),
+      Preconditions.checkArgument(
+          value == null || !type.equals(SnapshotRefType.TAG),
           "Tags do not support setting minSnapshotsToKeep");
-      Preconditions.checkArgument(value == null || value > 0,
-          "Min snapshots to keep must be greater than 0");
+      Preconditions.checkArgument(
+          value == null || value > 0, "Min snapshots to keep must be greater than 0");
       this.minSnapshotsToKeep = value;
       return this;
     }
 
     public Builder maxSnapshotAgeMs(Long value) {
-      Preconditions.checkArgument(value == null || !type.equals(SnapshotRefType.TAG),
+      Preconditions.checkArgument(
+          value == null || !type.equals(SnapshotRefType.TAG),
           "Tags do not support setting maxSnapshotAgeMs");
-      Preconditions.checkArgument(value == null || value > 0,
-          "Max snapshot age must be greater than 0 ms");
+      Preconditions.checkArgument(
+          value == null || value > 0, "Max snapshot age must be greater than 0 ms");
       this.maxSnapshotAgeMs = value;
       return this;
     }
 
     public Builder maxRefAgeMs(Long value) {
-      Preconditions.checkArgument(value == null || value > 0, "Max reference age must be greater than 0");
+      Preconditions.checkArgument(
+          value == null || value > 0, "Max reference age must be greater than 0");
       this.maxRefAgeMs = value;
       return this;
     }
diff --git a/api/src/main/java/org/apache/iceberg/SnapshotRefType.java b/api/src/main/java/org/apache/iceberg/SnapshotRefType.java
index 18d5d1f2af..b878d9d0dd 100644
--- a/api/src/main/java/org/apache/iceberg/SnapshotRefType.java
+++ b/api/src/main/java/org/apache/iceberg/SnapshotRefType.java
@@ -16,10 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 enum SnapshotRefType {
-    BRANCH,
-    TAG
+  BRANCH,
+  TAG
 }
diff --git a/api/src/main/java/org/apache/iceberg/SnapshotUpdate.java b/api/src/main/java/org/apache/iceberg/SnapshotUpdate.java
index c1742f82ca..2c5ab79008 100644
--- a/api/src/main/java/org/apache/iceberg/SnapshotUpdate.java
+++ b/api/src/main/java/org/apache/iceberg/SnapshotUpdate.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.concurrent.ExecutorService;
diff --git a/api/src/main/java/org/apache/iceberg/SortDirection.java b/api/src/main/java/org/apache/iceberg/SortDirection.java
index 3be60b6520..5436f14c9c 100644
--- a/api/src/main/java/org/apache/iceberg/SortDirection.java
+++ b/api/src/main/java/org/apache/iceberg/SortDirection.java
@@ -16,9 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 public enum SortDirection {
-  ASC, DESC
+  ASC,
+  DESC
 }
diff --git a/api/src/main/java/org/apache/iceberg/SortField.java b/api/src/main/java/org/apache/iceberg/SortField.java
index f980d6f8e4..91a82b2bd6 100644
--- a/api/src/main/java/org/apache/iceberg/SortField.java
+++ b/api/src/main/java/org/apache/iceberg/SortField.java
@@ -16,16 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.io.Serializable;
 import java.util.Objects;
 import org.apache.iceberg.transforms.Transform;
 
-/**
- * A field in a {@link SortOrder}.
- */
+/** A field in a {@link SortOrder}. */
 public class SortField implements Serializable {
 
   private final Transform<?, ?> transform;
@@ -52,23 +49,17 @@ public class SortField implements Serializable {
     return (Transform<S, T>) transform;
   }
 
-  /**
-   * Returns the field id of the source field in the {@link SortOrder sort order's} table schema
-   */
+  /** Returns the field id of the source field in the {@link SortOrder sort order's} table schema */
   public int sourceId() {
     return sourceId;
   }
 
-  /**
-   * Returns the sort direction
-   */
+  /** Returns the sort direction */
   public SortDirection direction() {
     return direction;
   }
 
-  /**
-   * Returns the null order
-   */
+  /** Returns the null order */
   public NullOrder nullOrder() {
     return nullOrder;
   }
@@ -82,7 +73,9 @@ public class SortField implements Serializable {
   public boolean satisfies(SortField other) {
     if (Objects.equals(this, other)) {
       return true;
-    } else if (sourceId != other.sourceId || direction != other.direction || nullOrder != other.nullOrder) {
+    } else if (sourceId != other.sourceId
+        || direction != other.direction
+        || nullOrder != other.nullOrder) {
       return false;
     }
 
@@ -103,10 +96,10 @@ public class SortField implements Serializable {
     }
 
     SortField that = (SortField) other;
-    return transform.equals(that.transform) &&
-        sourceId == that.sourceId &&
-        direction == that.direction &&
-        nullOrder == that.nullOrder;
+    return transform.equals(that.transform)
+        && sourceId == that.sourceId
+        && direction == that.direction
+        && nullOrder == that.nullOrder;
   }
 
   @Override
diff --git a/api/src/main/java/org/apache/iceberg/SortOrder.java b/api/src/main/java/org/apache/iceberg/SortOrder.java
index 91b6177ece..ee02318de6 100644
--- a/api/src/main/java/org/apache/iceberg/SortOrder.java
+++ b/api/src/main/java/org/apache/iceberg/SortOrder.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.io.Serializable;
@@ -39,11 +38,10 @@ import org.apache.iceberg.transforms.Transforms;
 import org.apache.iceberg.types.Type;
 import org.apache.iceberg.types.Types;
 
-/**
- * A sort order that defines how data and delete files should be ordered in a table.
- */
+/** A sort order that defines how data and delete files should be ordered in a table. */
 public class SortOrder implements Serializable {
-  private static final SortOrder UNSORTED_ORDER = new SortOrder(new Schema(), 0, Collections.emptyList());
+  private static final SortOrder UNSORTED_ORDER =
+      new SortOrder(new Schema(), 0, Collections.emptyList());
 
   private final Schema schema;
   private final int orderId;
@@ -57,37 +55,27 @@ public class SortOrder implements Serializable {
     this.fields = fields.toArray(new SortField[0]);
   }
 
-  /**
-   * Returns the {@link Schema} for this sort order
-   */
+  /** Returns the {@link Schema} for this sort order */
   public Schema schema() {
     return schema;
   }
 
-  /**
-   * Returns the ID of this sort order
-   */
+  /** Returns the ID of this sort order */
   public int orderId() {
     return orderId;
   }
 
-  /**
-   * Returns the list of {@link SortField sort fields} for this sort order
-   */
+  /** Returns the list of {@link SortField sort fields} for this sort order */
   public List<SortField> fields() {
     return lazyFieldList();
   }
 
-  /**
-   * Returns true if the sort order is sorted
-   */
+  /** Returns true if the sort order is sorted */
   public boolean isSorted() {
     return fields.length >= 1;
   }
 
-  /**
-   * Returns true if the sort order is unsorted
-   */
+  /** Returns true if the sort order is unsorted */
   public boolean isUnsorted() {
     return fields.length < 1;
   }
@@ -139,7 +127,8 @@ public class SortOrder implements Serializable {
     UnboundSortOrder.Builder builder = UnboundSortOrder.builder().withOrderId(orderId);
 
     for (SortField field : fields) {
-      builder.addSortField(field.transform().toString(), field.sourceId(), field.direction(), field.nullOrder());
+      builder.addSortField(
+          field.transform().toString(), field.sourceId(), field.direction(), field.nullOrder());
     }
 
     return builder.build();
@@ -198,8 +187,8 @@ public class SortOrder implements Serializable {
 
   /**
    * A builder used to create valid {@link SortOrder sort orders}.
-   * <p>
-   * Call {@link #builderFor(Schema)} to create a new builder.
+   *
+   * <p>Call {@link #builderFor(Schema)} to create a new builder.
    */
   public static class Builder implements SortOrderBuilder<Builder> {
     private final Schema schema;
@@ -243,7 +232,6 @@ public class SortOrder implements Serializable {
       return addSortField(term, direction, nullOrder);
     }
 
-
     public Builder withOrderId(int newOrderId) {
       this.orderId = newOrderId;
       return this;
@@ -256,7 +244,8 @@ public class SortOrder implements Serializable {
 
     private Builder addSortField(Term term, SortDirection direction, NullOrder nullOrder) {
       Preconditions.checkArgument(term instanceof UnboundTerm, "Term must be unbound");
-      // ValidationException is thrown by bind if binding fails so we assume that boundTerm is correct
+      // ValidationException is thrown by bind if binding fails so we assume that boundTerm is
+      // correct
       BoundTerm<?> boundTerm = ((UnboundTerm<?>) term).bind(schema.asStruct(), caseSensitive);
       int sourceId = boundTerm.ref().fieldId();
       SortField sortField = new SortField(toTransform(boundTerm), sourceId, direction, nullOrder);
@@ -264,14 +253,16 @@ public class SortOrder implements Serializable {
       return this;
     }
 
-    Builder addSortField(String transformAsString, int sourceId, SortDirection direction, NullOrder nullOrder) {
+    Builder addSortField(
+        String transformAsString, int sourceId, SortDirection direction, NullOrder nullOrder) {
       Types.NestedField column = schema.findField(sourceId);
       ValidationException.check(column != null, "Cannot find source column: %s", sourceId);
       Transform<?, ?> transform = Transforms.fromString(column.type(), transformAsString);
       return addSortField(transform, sourceId, direction, nullOrder);
     }
 
-    Builder addSortField(Transform<?, ?> transform, int sourceId, SortDirection direction, NullOrder nullOrder) {
+    Builder addSortField(
+        Transform<?, ?> transform, int sourceId, SortDirection direction, NullOrder nullOrder) {
       SortField sortField = new SortField(transform, sourceId, direction, nullOrder);
       fields.add(sortField);
       return this;
@@ -306,7 +297,8 @@ public class SortOrder implements Serializable {
       } else if (term instanceof BoundTransform) {
         return ((BoundTransform<?, ?>) term).transform();
       } else {
-        throw new ValidationException("Invalid term: %s, expected either a bound reference or transform", term);
+        throw new ValidationException(
+            "Invalid term: %s, expected either a bound reference or transform", term);
       }
     }
   }
@@ -315,15 +307,16 @@ public class SortOrder implements Serializable {
     for (SortField field : sortOrder.fields) {
       Type sourceType = schema.findType(field.sourceId());
       ValidationException.check(
-          sourceType != null,
-          "Cannot find source column for sort field: %s", field);
+          sourceType != null, "Cannot find source column for sort field: %s", field);
       ValidationException.check(
           sourceType.isPrimitiveType(),
-          "Cannot sort by non-primitive source field: %s", sourceType);
+          "Cannot sort by non-primitive source field: %s",
+          sourceType);
       ValidationException.check(
           field.transform().canTransform(sourceType),
           "Invalid source type %s for transform: %s",
-          sourceType, field.transform());
+          sourceType,
+          field.transform());
     }
   }
 }
diff --git a/api/src/main/java/org/apache/iceberg/SortOrderBuilder.java b/api/src/main/java/org/apache/iceberg/SortOrderBuilder.java
index ac622a2f05..1538586df1 100644
--- a/api/src/main/java/org/apache/iceberg/SortOrderBuilder.java
+++ b/api/src/main/java/org/apache/iceberg/SortOrderBuilder.java
@@ -16,15 +16,12 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import org.apache.iceberg.expressions.Expressions;
 import org.apache.iceberg.expressions.Term;
 
-/**
- * Methods for building a sort order.
- */
+/** Methods for building a sort order. */
 public interface SortOrderBuilder<R> {
 
   /**
diff --git a/api/src/main/java/org/apache/iceberg/SplittableScanTask.java b/api/src/main/java/org/apache/iceberg/SplittableScanTask.java
index 0385e55972..4df61cb908 100644
--- a/api/src/main/java/org/apache/iceberg/SplittableScanTask.java
+++ b/api/src/main/java/org/apache/iceberg/SplittableScanTask.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 /**
@@ -26,10 +25,12 @@ package org.apache.iceberg;
  */
 public interface SplittableScanTask<ThisT> extends ScanTask {
   /**
-   * Attempts to split this scan task into several smaller scan tasks, each close to {@code splitSize} size.
-   * <p>
-   * Note the target split size is just guidance and the actual split size may be either smaller or larger.
-   * File formats like Parquet may leverage the row group offset information while splitting tasks.
+   * Attempts to split this scan task into several smaller scan tasks, each close to {@code
+   * splitSize} size.
+   *
+   * <p>Note the target split size is just guidance and the actual split size may be either smaller
+   * or larger. File formats like Parquet may leverage the row group offset information while
+   * splitting tasks.
    *
    * @param targetSplitSize the target size of each new scan task in bytes
    * @return an Iterable of smaller tasks
diff --git a/api/src/main/java/org/apache/iceberg/StructLike.java b/api/src/main/java/org/apache/iceberg/StructLike.java
index 0d3ddb40c7..9ff59aa9ac 100644
--- a/api/src/main/java/org/apache/iceberg/StructLike.java
+++ b/api/src/main/java/org/apache/iceberg/StructLike.java
@@ -16,13 +16,12 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 /**
  * Interface for accessing data by position in a schema.
- * <p>
- * This interface supports accessing data in top-level fields, not in nested fields.
+ *
+ * <p>This interface supports accessing data in top-level fields, not in nested fields.
  */
 public interface StructLike {
   int size();
diff --git a/api/src/main/java/org/apache/iceberg/Table.java b/api/src/main/java/org/apache/iceberg/Table.java
index b83f07c6f8..8278c99bfc 100644
--- a/api/src/main/java/org/apache/iceberg/Table.java
+++ b/api/src/main/java/org/apache/iceberg/Table.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.List;
@@ -25,9 +24,7 @@ import org.apache.iceberg.encryption.EncryptionManager;
 import org.apache.iceberg.io.FileIO;
 import org.apache.iceberg.io.LocationProvider;
 
-/**
- * Represents a table.
- */
+/** Represents a table. */
 public interface Table {
 
   /**
@@ -39,15 +36,13 @@ public interface Table {
     return toString();
   }
 
-  /**
-   * Refresh the current table metadata.
-   */
+  /** Refresh the current table metadata. */
   void refresh();
 
   /**
    * Create a new {@link TableScan scan} for this table.
-   * <p>
-   * Once a table scan is created, it can be refined to project columns and filter data.
+   *
+   * <p>Once a table scan is created, it can be refined to project columns and filter data.
    *
    * @return a table scan for this table
    */
@@ -55,8 +50,8 @@ public interface Table {
 
   /**
    * Create a new {@link IncrementalAppendScan scan} for this table.
-   * <p>
-   * Once a scan is created, it can be refined to project columns and filter data.
+   *
+   * <p>Once a scan is created, it can be refined to project columns and filter data.
    *
    * @return an incremental scan for appends only snapshots
    */
@@ -66,8 +61,8 @@ public interface Table {
 
   /**
    * Create a new {@link IncrementalChangelogScan} for this table.
-   * <p>
-   * Once a scan is created, it can be refined to project columns and filter data.
+   *
+   * <p>Once a scan is created, it can be refined to project columns and filter data.
    *
    * @return an incremental changelog scan
    */
@@ -168,7 +163,8 @@ public interface Table {
   UpdateSchema updateSchema();
 
   /**
-   * Create a new {@link UpdatePartitionSpec} to alter the partition spec of this table and commit the change.
+   * Create a new {@link UpdatePartitionSpec} to alter the partition spec of this table and commit
+   * the change.
    *
    * @return a new {@link UpdatePartitionSpec}
    */
@@ -204,13 +200,13 @@ public interface Table {
 
   /**
    * Create a new {@link AppendFiles append API} to add files to this table and commit.
-   * <p>
-   * Using this method signals to the underlying implementation that the append should not perform
-   * extra work in order to commit quickly. Fast appends are not recommended for normal writes
-   * because the fast commit may cause split planning to slow down over time.
-   * <p>
-   * Implementations may not support fast appends, in which case this will return the same appender
-   * as {@link #newAppend()}.
+   *
+   * <p>Using this method signals to the underlying implementation that the append should not
+   * perform extra work in order to commit quickly. Fast appends are not recommended for normal
+   * writes because the fast commit may cause split planning to slow down over time.
+   *
+   * <p>Implementations may not support fast appends, in which case this will return the same
+   * appender as {@link #newAppend()}.
    *
    * @return a new {@link AppendFiles}
    */
@@ -226,8 +222,8 @@ public interface Table {
   RewriteFiles newRewrite();
 
   /**
-   * Create a new {@link RewriteManifests rewrite manifests API} to replace manifests for this
-   * table and commit.
+   * Create a new {@link RewriteManifests rewrite manifests API} to replace manifests for this table
+   * and commit.
    *
    * @return a new {@link RewriteManifests}
    */
@@ -241,7 +237,8 @@ public interface Table {
   OverwriteFiles newOverwrite();
 
   /**
-   * Create a new {@link RowDelta row-level delta API} to remove or replace rows in existing data files.
+   * Create a new {@link RowDelta row-level delta API} to remove or replace rows in existing data
+   * files.
    *
    * @return a new {@link RowDelta}
    */
@@ -250,9 +247,10 @@ public interface Table {
   /**
    * Not recommended: Create a new {@link ReplacePartitions replace partitions API} to dynamically
    * overwrite partitions in the table with new data.
-   * <p>
-   * This is provided to implement SQL compatible with Hive table operations but is not recommended.
-   * Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data.
+   *
+   * <p>This is provided to implement SQL compatible with Hive table operations but is not
+   * recommended. Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite
+   * data.
    *
    * @return a new {@link ReplacePartitions}
    */
@@ -282,7 +280,9 @@ public interface Table {
   Rollback rollback();
 
   /**
-   * Create a new {@link ManageSnapshots manage snapshots API} to manage snapshots in this table and commit.
+   * Create a new {@link ManageSnapshots manage snapshots API} to manage snapshots in this table and
+   * commit.
+   *
    * @return a new {@link ManageSnapshots}
    */
   ManageSnapshots manageSnapshots();
@@ -294,18 +294,15 @@ public interface Table {
    */
   Transaction newTransaction();
 
-  /**
-   * Returns a {@link FileIO} to read and write table data and metadata files.
-   */
+  /** Returns a {@link FileIO} to read and write table data and metadata files. */
   FileIO io();
 
   /**
-   * Returns an {@link org.apache.iceberg.encryption.EncryptionManager} to encrypt and decrypt data files.
+   * Returns an {@link org.apache.iceberg.encryption.EncryptionManager} to encrypt and decrypt data
+   * files.
    */
   EncryptionManager encryption();
 
-  /**
-   * Returns a {@link LocationProvider} to provide locations for new data files.
-   */
+  /** Returns a {@link LocationProvider} to provide locations for new data files. */
   LocationProvider locationProvider();
 }
diff --git a/api/src/main/java/org/apache/iceberg/TableScan.java b/api/src/main/java/org/apache/iceberg/TableScan.java
index fedeb05203..de0e76b8b1 100644
--- a/api/src/main/java/org/apache/iceberg/TableScan.java
+++ b/api/src/main/java/org/apache/iceberg/TableScan.java
@@ -16,15 +16,12 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import org.apache.iceberg.expressions.Expression;
 import org.apache.iceberg.relocated.com.google.common.collect.Lists;
 
-/**
- * API for configuring a table scan.
- */
+/** API for configuring a table scan. */
 public interface TableScan extends Scan<TableScan, FileScanTask, CombinedScanTask> {
   /**
    * Returns the {@link Table} from which this scan loads data.
@@ -73,25 +70,25 @@ public interface TableScan extends Scan<TableScan, FileScanTask, CombinedScanTas
   Expression filter();
 
   /**
-   * Create a new {@link TableScan} to read appended data from {@code fromSnapshotId} exclusive to {@code toSnapshotId}
-   * inclusive.
+   * Create a new {@link TableScan} to read appended data from {@code fromSnapshotId} exclusive to
+   * {@code toSnapshotId} inclusive.
    *
    * @param fromSnapshotId the last snapshot id read by the user, exclusive
    * @param toSnapshotId read append data up to this snapshot id
-   * @return a table scan which can read append data from {@code fromSnapshotId}
-   * exclusive and up to {@code toSnapshotId} inclusive
+   * @return a table scan which can read append data from {@code fromSnapshotId} exclusive and up to
+   *     {@code toSnapshotId} inclusive
    */
   default TableScan appendsBetween(long fromSnapshotId, long toSnapshotId) {
     throw new UnsupportedOperationException("Incremental scan is not supported");
   }
 
   /**
-   * Create a new {@link TableScan} to read appended data from {@code fromSnapshotId} exclusive to the current snapshot
-   * inclusive.
+   * Create a new {@link TableScan} to read appended data from {@code fromSnapshotId} exclusive to
+   * the current snapshot inclusive.
    *
    * @param fromSnapshotId - the last snapshot id read by the user, exclusive
-   * @return a table scan which can read append data from {@code fromSnapshotId}
-   * exclusive and up to current snapshot inclusive
+   * @return a table scan which can read append data from {@code fromSnapshotId} exclusive and up to
+   *     current snapshot inclusive
    */
   default TableScan appendsAfter(long fromSnapshotId) {
     throw new UnsupportedOperationException("Incremental scan is not supported");
@@ -99,16 +96,18 @@ public interface TableScan extends Scan<TableScan, FileScanTask, CombinedScanTas
 
   /**
    * Returns the {@link Snapshot} that will be used by this scan.
-   * <p>
-   * If the snapshot was not configured using {@link #asOfTime(long)} or {@link #useSnapshot(long)}, the current table
-   * snapshot will be used.
+   *
+   * <p>If the snapshot was not configured using {@link #asOfTime(long)} or {@link
+   * #useSnapshot(long)}, the current table snapshot will be used.
    *
    * @return the Snapshot this scan will use
    */
   Snapshot snapshot();
 
   /**
-   * Returns whether this scan should apply column name case sensitiveness as per {@link Scan#caseSensitive(boolean)}.
+   * Returns whether this scan should apply column name case sensitiveness as per {@link
+   * Scan#caseSensitive(boolean)}.
+   *
    * @return true if case sensitive, false otherwise.
    */
   boolean isCaseSensitive();
diff --git a/api/src/main/java/org/apache/iceberg/Tables.java b/api/src/main/java/org/apache/iceberg/Tables.java
index 1c1daafed5..eae6146a76 100644
--- a/api/src/main/java/org/apache/iceberg/Tables.java
+++ b/api/src/main/java/org/apache/iceberg/Tables.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.Map;
@@ -25,8 +24,8 @@ import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
 /**
  * Generic interface for creating and loading a table implementation.
  *
- * The 'tableIdentifier' field should be interpreted by the underlying
- * implementation (e.g. database.table_name)
+ * <p>The 'tableIdentifier' field should be interpreted by the underlying implementation (e.g.
+ * database.table_name)
  */
 public interface Tables {
   default Table create(Schema schema, String tableIdentifier) {
@@ -37,16 +36,19 @@ public interface Tables {
     return create(schema, spec, ImmutableMap.of(), tableIdentifier);
   }
 
-  default Table create(Schema schema, PartitionSpec spec, Map<String, String> properties, String tableIdentifier) {
+  default Table create(
+      Schema schema, PartitionSpec spec, Map<String, String> properties, String tableIdentifier) {
     return create(schema, spec, SortOrder.unsorted(), properties, tableIdentifier);
   }
 
-  default Table create(Schema schema,
-                       PartitionSpec spec,
-                       SortOrder order,
-                       Map<String, String> properties,
-                       String tableIdentifier) {
-    throw new UnsupportedOperationException(this.getClass().getName() + " does not implement create with a sort order");
+  default Table create(
+      Schema schema,
+      PartitionSpec spec,
+      SortOrder order,
+      Map<String, String> properties,
+      String tableIdentifier) {
+    throw new UnsupportedOperationException(
+        this.getClass().getName() + " does not implement create with a sort order");
   }
 
   Table load(String tableIdentifier);
diff --git a/api/src/main/java/org/apache/iceberg/Transaction.java b/api/src/main/java/org/apache/iceberg/Transaction.java
index 609f86d1a0..9368150e69 100644
--- a/api/src/main/java/org/apache/iceberg/Transaction.java
+++ b/api/src/main/java/org/apache/iceberg/Transaction.java
@@ -16,15 +16,12 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import org.apache.iceberg.exceptions.CommitFailedException;
 import org.apache.iceberg.exceptions.ValidationException;
 
-/**
- * A transaction for performing multiple updates to a table.
- */
+/** A transaction for performing multiple updates to a table. */
 public interface Transaction {
   /**
    * Return the {@link Table} that this transaction will update.
@@ -77,13 +74,13 @@ public interface Transaction {
 
   /**
    * Create a new {@link AppendFiles append API} to add files to this table.
-   * <p>
-   * Using this method signals to the underlying implementation that the append should not perform
-   * extra work in order to commit quickly. Fast appends are not recommended for normal writes
-   * because the fast commit may cause split planning to slow down over time.
-   * <p>
-   * Implementations may not support fast appends, in which case this will return the same appender
-   * as {@link #newAppend()}.
+   *
+   * <p>Using this method signals to the underlying implementation that the append should not
+   * perform extra work in order to commit quickly. Fast appends are not recommended for normal
+   * writes because the fast commit may cause split planning to slow down over time.
+   *
+   * <p>Implementations may not support fast appends, in which case this will return the same
+   * appender as {@link #newAppend()}.
    *
    * @return a new {@link AppendFiles}
    */
@@ -99,7 +96,8 @@ public interface Transaction {
   RewriteFiles newRewrite();
 
   /**
-   * Create a new {@link RewriteManifests rewrite manifests API} to replace manifests for this table.
+   * Create a new {@link RewriteManifests rewrite manifests API} to replace manifests for this
+   * table.
    *
    * @return a new {@link RewriteManifests}
    */
@@ -113,7 +111,8 @@ public interface Transaction {
   OverwriteFiles newOverwrite();
 
   /**
-   * Create a new {@link RowDelta row-level delta API} to remove or replace rows in existing data files.
+   * Create a new {@link RowDelta row-level delta API} to remove or replace rows in existing data
+   * files.
    *
    * @return a new {@link RowDelta}
    */
@@ -122,9 +121,10 @@ public interface Transaction {
   /**
    * Not recommended: Create a new {@link ReplacePartitions replace partitions API} to dynamically
    * overwrite partitions in the table with new data.
-   * <p>
-   * This is provided to implement SQL compatible with Hive table operations but is not recommended.
-   * Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data.
+   *
+   * <p>This is provided to implement SQL compatible with Hive table operations but is not
+   * recommended. Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite
+   * data.
    *
    * @return a new {@link ReplacePartitions}
    */
diff --git a/api/src/main/java/org/apache/iceberg/UnboundPartitionSpec.java b/api/src/main/java/org/apache/iceberg/UnboundPartitionSpec.java
index 5cde0f6324..530d3d442c 100644
--- a/api/src/main/java/org/apache/iceberg/UnboundPartitionSpec.java
+++ b/api/src/main/java/org/apache/iceberg/UnboundPartitionSpec.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.List;
@@ -116,7 +115,8 @@ public class UnboundPartitionSpec {
       return name;
     }
 
-    private UnboundPartitionField(String transformAsString, int sourceId, Integer partitionId, String name) {
+    private UnboundPartitionField(
+        String transformAsString, int sourceId, Integer partitionId, String name) {
       this.transformAsString = transformAsString;
       this.sourceId = sourceId;
       this.partitionId = partitionId;
diff --git a/api/src/main/java/org/apache/iceberg/UnboundSortOrder.java b/api/src/main/java/org/apache/iceberg/UnboundSortOrder.java
index 32ef18f88f..1181b665f8 100644
--- a/api/src/main/java/org/apache/iceberg/UnboundSortOrder.java
+++ b/api/src/main/java/org/apache/iceberg/UnboundSortOrder.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import java.util.Collections;
@@ -24,7 +23,8 @@ import java.util.List;
 import org.apache.iceberg.relocated.com.google.common.collect.Lists;
 
 public class UnboundSortOrder {
-  private static final UnboundSortOrder UNSORTED_ORDER = new UnboundSortOrder(0, Collections.emptyList());
+  private static final UnboundSortOrder UNSORTED_ORDER =
+      new UnboundSortOrder(0, Collections.emptyList());
 
   private final int orderId;
   private final List<UnboundSortField> fields;
@@ -38,7 +38,8 @@ public class UnboundSortOrder {
     SortOrder.Builder builder = SortOrder.builderFor(schema).withOrderId(orderId);
 
     for (UnboundSortField field : fields) {
-      builder.addSortField(field.transformAsString, field.sourceId, field.direction, field.nullOrder);
+      builder.addSortField(
+          field.transformAsString, field.sourceId, field.direction, field.nullOrder);
     }
 
     return builder.build();
@@ -48,7 +49,8 @@ public class UnboundSortOrder {
     SortOrder.Builder builder = SortOrder.builderFor(schema).withOrderId(orderId);
 
     for (UnboundSortField field : fields) {
-      builder.addSortField(field.transformAsString, field.sourceId, field.direction, field.nullOrder);
+      builder.addSortField(
+          field.transformAsString, field.sourceId, field.direction, field.nullOrder);
     }
 
     return builder.buildUnchecked();
@@ -73,22 +75,22 @@ public class UnboundSortOrder {
 
   /**
    * A builder used to create {@link UnboundSortOrder unbound sort orders}.
-   * <p>
-   * Call {@link #builder()} to create a new builder.
+   *
+   * <p>Call {@link #builder()} to create a new builder.
    */
   static class Builder {
     private final List<UnboundSortField> fields = Lists.newArrayList();
     private Integer orderId = null;
 
-    private Builder() {
-    }
+    private Builder() {}
 
     Builder withOrderId(int newOrderId) {
       this.orderId = newOrderId;
       return this;
     }
 
-    Builder addSortField(String transformAsString, int sourceId, SortDirection direction, NullOrder nullOrder) {
+    Builder addSortField(
+        String transformAsString, int sourceId, SortDirection direction, NullOrder nullOrder) {
       fields.add(new UnboundSortField(transformAsString, sourceId, direction, nullOrder));
       return this;
     }
@@ -117,7 +119,8 @@ public class UnboundSortOrder {
     private final SortDirection direction;
     private final NullOrder nullOrder;
 
-    private UnboundSortField(String transformAsString, int sourceId, SortDirection direction, NullOrder nullOrder) {
+    private UnboundSortField(
+        String transformAsString, int sourceId, SortDirection direction, NullOrder nullOrder) {
       this.transformAsString = transformAsString;
       this.sourceId = sourceId;
       this.direction = direction;
diff --git a/api/src/main/java/org/apache/iceberg/UpdateLocation.java b/api/src/main/java/org/apache/iceberg/UpdateLocation.java
index 4513749b2c..646fbb1229 100644
--- a/api/src/main/java/org/apache/iceberg/UpdateLocation.java
+++ b/api/src/main/java/org/apache/iceberg/UpdateLocation.java
@@ -16,12 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
-/**
- * API for setting a table's base location.
- */
+/** API for setting a table's base location. */
 public interface UpdateLocation extends PendingUpdate<String> {
   /**
    * Set the table's location.
diff --git a/api/src/main/java/org/apache/iceberg/UpdatePartitionSpec.java b/api/src/main/java/org/apache/iceberg/UpdatePartitionSpec.java
index 5e85dd9919..f48d590af1 100644
--- a/api/src/main/java/org/apache/iceberg/UpdatePartitionSpec.java
+++ b/api/src/main/java/org/apache/iceberg/UpdatePartitionSpec.java
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.iceberg;
 
 import org.apache.iceberg.exceptions.CommitFailedException;
@@ -25,8 +24,8 @@ import org.apache.iceberg.expressions.Term;
 
 /**
  * API for partition spec evolution.
- * <p>
- * When committing, these changes will be applied to the current table metadata. Commit conflicts
+ *
+ * <p>When committing, these changes will be applied to the current table metadata. Commit conflicts
  * will not be resolved and will result in a {@link CommitFailedException}.
  */
 public interface UpdatePartitionSpec extends PendingUpdate<PartitionSpec> {
@@ -40,48 +39,53 @@ public interface UpdatePartitionSpec extends PendingUpdate<PartitionSpec> {
 
... 487141 lines suppressed ...