You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iceberg.apache.org by bl...@apache.org on 2019/12/13 00:59:47 UTC

[incubator-iceberg] branch spark-3 updated: Fix style checks and a test failure (#696)

This is an automated email from the ASF dual-hosted git repository.

blue pushed a commit to branch spark-3
in repository https://gitbox.apache.org/repos/asf/incubator-iceberg.git


The following commit(s) were added to refs/heads/spark-3 by this push:
     new c47059c  Fix style checks and a test failure (#696)
c47059c is described below

commit c47059c6c00e71537a84f36189edaba8565bb738
Author: Chen, Junjie <ch...@gmail.com>
AuthorDate: Fri Dec 13 08:59:37 2019 +0800

    Fix style checks and a test failure (#696)
---
 spark/src/main/java/org/apache/iceberg/spark/SparkCatalog.java    | 2 +-
 spark/src/main/java/org/apache/iceberg/spark/SparkFilters.java    | 4 ++++
 .../java/org/apache/iceberg/spark/source/SparkStreamingWrite.java | 8 --------
 .../java/org/apache/iceberg/spark/source/SparkWriteBuilder.java   | 4 ++--
 .../java/org/apache/iceberg/spark/source/TestFilteredScan.java    | 1 -
 5 files changed, 7 insertions(+), 12 deletions(-)

diff --git a/spark/src/main/java/org/apache/iceberg/spark/SparkCatalog.java b/spark/src/main/java/org/apache/iceberg/spark/SparkCatalog.java
index cd81e8b..1278d37 100644
--- a/spark/src/main/java/org/apache/iceberg/spark/SparkCatalog.java
+++ b/spark/src/main/java/org/apache/iceberg/spark/SparkCatalog.java
@@ -237,7 +237,7 @@ public class SparkCatalog implements StagingTableCatalog {
   }
 
   @Override
-  final public void initialize(String name, CaseInsensitiveStringMap options) {
+  public final void initialize(String name, CaseInsensitiveStringMap options) {
     boolean cacheEnabled = Boolean.parseBoolean(options.getOrDefault("cache-enabled", "true"));
     Catalog catalog = buildIcebergCatalog(name, options);
 
diff --git a/spark/src/main/java/org/apache/iceberg/spark/SparkFilters.java b/spark/src/main/java/org/apache/iceberg/spark/SparkFilters.java
index 6c43b87..ae1b88f 100644
--- a/spark/src/main/java/org/apache/iceberg/spark/SparkFilters.java
+++ b/spark/src/main/java/org/apache/iceberg/spark/SparkFilters.java
@@ -27,7 +27,9 @@ import org.apache.iceberg.expressions.Expression;
 import org.apache.iceberg.expressions.Expression.Operation;
 import org.apache.iceberg.expressions.Expressions;
 import org.apache.spark.sql.catalyst.util.DateTimeUtils;
+import org.apache.spark.sql.sources.AlwaysFalse;
 import org.apache.spark.sql.sources.AlwaysFalse$;
+import org.apache.spark.sql.sources.AlwaysTrue;
 import org.apache.spark.sql.sources.AlwaysTrue$;
 import org.apache.spark.sql.sources.And;
 import org.apache.spark.sql.sources.EqualNullSafe;
@@ -63,7 +65,9 @@ public class SparkFilters {
 
   private static final ImmutableMap<Class<? extends Filter>, Operation> FILTERS = ImmutableMap
       .<Class<? extends Filter>, Operation>builder()
+      .put(AlwaysTrue.class, Operation.TRUE)
       .put(AlwaysTrue$.class, Operation.TRUE)
+      .put(AlwaysFalse.class, Operation.FALSE)
       .put(AlwaysFalse$.class, Operation.FALSE)
       .put(EqualTo.class, Operation.EQ)
       .put(EqualNullSafe.class, Operation.EQ)
diff --git a/spark/src/main/java/org/apache/iceberg/spark/source/SparkStreamingWrite.java b/spark/src/main/java/org/apache/iceberg/spark/source/SparkStreamingWrite.java
index 5781534..c07c202 100644
--- a/spark/src/main/java/org/apache/iceberg/spark/source/SparkStreamingWrite.java
+++ b/spark/src/main/java/org/apache/iceberg/spark/source/SparkStreamingWrite.java
@@ -22,20 +22,12 @@ package org.apache.iceberg.spark.source;
 import java.util.Map;
 import org.apache.iceberg.AppendFiles;
 import org.apache.iceberg.DataFile;
-import org.apache.iceberg.FileFormat;
 import org.apache.iceberg.OverwriteFiles;
-import org.apache.iceberg.PartitionSpec;
 import org.apache.iceberg.Schema;
 import org.apache.iceberg.Snapshot;
 import org.apache.iceberg.SnapshotUpdate;
 import org.apache.iceberg.Table;
-import org.apache.iceberg.encryption.EncryptionManager;
 import org.apache.iceberg.expressions.Expressions;
-import org.apache.iceberg.io.FileIO;
-import org.apache.iceberg.io.LocationProvider;
-import org.apache.iceberg.util.PropertyUtil;
-import org.apache.spark.sql.catalyst.InternalRow;
-import org.apache.spark.sql.connector.write.DataWriter;
 import org.apache.spark.sql.connector.write.WriterCommitMessage;
 import org.apache.spark.sql.connector.write.streaming.StreamingDataWriterFactory;
 import org.apache.spark.sql.connector.write.streaming.StreamingWrite;
diff --git a/spark/src/main/java/org/apache/iceberg/spark/source/SparkWriteBuilder.java b/spark/src/main/java/org/apache/iceberg/spark/source/SparkWriteBuilder.java
index 3f1b48b..2b63be0 100644
--- a/spark/src/main/java/org/apache/iceberg/spark/source/SparkWriteBuilder.java
+++ b/spark/src/main/java/org/apache/iceberg/spark/source/SparkWriteBuilder.java
@@ -164,10 +164,10 @@ class SparkWriteBuilder implements WriteBuilder, SupportsDynamicOverwrite, Suppo
     }
   }
 
-  private boolean checkNullability(CaseInsensitiveStringMap options) {
+  private boolean checkNullability(CaseInsensitiveStringMap opts) {
     boolean sparkCheckNullability = Boolean.parseBoolean(spark.conf()
         .get("spark.sql.iceberg.check-nullability", "true"));
-    boolean dataFrameCheckNullability = options.getBoolean("check-nullability", true);
+    boolean dataFrameCheckNullability = opts.getBoolean("check-nullability", true);
     return sparkCheckNullability && dataFrameCheckNullability;
   }
 }
diff --git a/spark/src/test/java/org/apache/iceberg/spark/source/TestFilteredScan.java b/spark/src/test/java/org/apache/iceberg/spark/source/TestFilteredScan.java
index ead9a27..1bf37fa 100644
--- a/spark/src/test/java/org/apache/iceberg/spark/source/TestFilteredScan.java
+++ b/spark/src/test/java/org/apache/iceberg/spark/source/TestFilteredScan.java
@@ -53,7 +53,6 @@ import org.apache.spark.sql.api.java.UDF1;
 import org.apache.spark.sql.catalyst.expressions.UnsafeRow;
 import org.apache.spark.sql.connector.read.Batch;
 import org.apache.spark.sql.connector.read.InputPartition;
-import org.apache.spark.sql.connector.read.Scan;
 import org.apache.spark.sql.connector.read.ScanBuilder;
 import org.apache.spark.sql.connector.read.SupportsPushDownFilters;
 import org.apache.spark.sql.sources.And;