You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by GitBox <gi...@apache.org> on 2019/07/05 20:00:41 UTC

[GitHub] [spark] rdblue commented on a change in pull request #24832: [SPARK-27845][SQL] DataSourceV2: InsertTable

rdblue commented on a change in pull request #24832: [SPARK-27845][SQL] DataSourceV2: InsertTable
URL: https://github.com/apache/spark/pull/24832#discussion_r300785760
 
 

 ##########
 File path: sql/core/src/test/scala/org/apache/spark/sql/sources/v2/TestInMemoryTableCatalog.scala
 ##########
 @@ -149,49 +161,89 @@ private class InMemoryTable(
   }
 
   override def newWriteBuilder(options: CaseInsensitiveStringMap): WriteBuilder = {
-    new WriteBuilder with SupportsTruncate {
-      private var shouldTruncate: Boolean = false
+    new WriteBuilder with SupportsTruncate with SupportsOverwrite with SupportsDynamicOverwrite {
+      private var writer: BatchWrite = Append
 
       override def truncate(): WriteBuilder = {
-        shouldTruncate = true
+        assert(writer == Append)
+        writer = TruncateAndAppend
+        this
+      }
+
+      override def overwrite(filters: Array[Filter]): WriteBuilder = {
+        assert(writer == Append)
+        writer = new Overwrite(filters)
         this
       }
 
-      override def buildForBatch(): BatchWrite = {
-        if (shouldTruncate) TruncateAndAppend else Append
+      override def overwriteDynamicPartitions(): WriteBuilder = {
+        assert(writer == Append)
+        writer = DynamicOverwrite
+        this
       }
+
+      override def buildForBatch(): BatchWrite = writer
     }
   }
 
-  private object TruncateAndAppend extends BatchWrite {
+  private abstract class TestBatchWrite extends BatchWrite {
     override def createBatchWriterFactory(): DataWriterFactory = {
       BufferedRowsWriterFactory
     }
 
-    override def commit(messages: Array[WriterCommitMessage]): Unit = {
-      replaceData(messages.map(_.asInstanceOf[BufferedRows]))
+    override def abort(messages: Array[WriterCommitMessage]): Unit = {
     }
+  }
 
-    override def abort(messages: Array[WriterCommitMessage]): Unit = {
+  private object Append extends TestBatchWrite {
+    override def commit(messages: Array[WriterCommitMessage]): Unit = dataMap.synchronized {
+      withData(messages.map(_.asInstanceOf[BufferedRows]))
     }
   }
 
-  private object Append extends BatchWrite {
-    override def createBatchWriterFactory(): DataWriterFactory = {
-      BufferedRowsWriterFactory
+  private object DynamicOverwrite extends TestBatchWrite {
+    override def commit(messages: Array[WriterCommitMessage]): Unit = dataMap.synchronized {
+      val newData = messages.map(_.asInstanceOf[BufferedRows])
+      dataMap --= newData.flatMap(_.rows.map(getKey))
+      withData(newData)
     }
+  }
 
-    override def commit(messages: Array[WriterCommitMessage]): Unit = {
-      replaceData(data ++ messages.map(_.asInstanceOf[BufferedRows]))
+  private class Overwrite(filters: Array[Filter]) extends TestBatchWrite {
+    override def commit(messages: Array[WriterCommitMessage]): Unit = dataMap.synchronized {
+      val deleteKeys = dataMap.keys.filter { partValues =>
+        filters.exists {
+          case EqualTo(attr, value) =>
+            partFieldNames.zipWithIndex.find(_._1 == attr) match {
+              case Some((_, partIndex)) =>
+                value == partValues(partIndex)
+              case _ =>
+                throw new IllegalArgumentException(s"Unknown filter attribute: $attr")
+            }
+          case f @ _ =>
+            throw new IllegalArgumentException(s"Unsupported filter type: $f")
+        }
+      }
+      dataMap --= deleteKeys
+      withData(messages.map(_.asInstanceOf[BufferedRows]))
     }
+  }
 
-    override def abort(messages: Array[WriterCommitMessage]): Unit = {
+  private object TruncateAndAppend extends TestBatchWrite {
+    override def commit(messages: Array[WriterCommitMessage]): Unit = dataMap.synchronized {
+      dataMap = mutable.Map.empty
 
 Review comment:
   This should use `dataMap.clear` instead of re-assigning because this is synchronized on the original `dataMap` instance. After reassignment, another thread will be able to enter a `synchronized` block on the new instance.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org