You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@iceberg.apache.org by GitBox <gi...@apache.org> on 2022/12/09 22:34:26 UTC

[GitHub] [iceberg] szehon-ho commented on a diff in pull request #6344: Spark 3.3: Introduce the changelog iterator

szehon-ho commented on code in PR #6344:
URL: https://github.com/apache/iceberg/pull/6344#discussion_r1044799711


##########
api/src/main/java/org/apache/iceberg/ChangelogOperation.java:
##########
@@ -21,5 +21,7 @@
 /** An enum representing possible operations in a changelog. */
 public enum ChangelogOperation {
   INSERT,
-  DELETE
+  DELETE,
+  UPDATE_BEFORE,

Review Comment:
   Just curious, are we still going with "PRE_UPDATE" , "POST_UPDATE" as design doc?  At least I think having the adjective before "Update" is better.  Not sure if I missed some other discussion



##########
spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/TestChangelogIterator.java:
##########
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark;
+
+import static org.apache.iceberg.ChangelogOperation.DELETE;
+import static org.apache.iceberg.ChangelogOperation.INSERT;
+import static org.apache.iceberg.ChangelogOperation.UPDATE_AFTER;
+import static org.apache.iceberg.ChangelogOperation.UPDATE_BEFORE;
+
+import java.util.List;
+import java.util.Objects;
+import org.apache.iceberg.relocated.com.google.common.collect.Iterators;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema;
+import org.junit.Test;
+
+public class TestChangelogIterator {
+  private final List<Row> rows =
+      Lists.newArrayList(
+          new GenericRowWithSchema(new Object[] {1, "a", "data", "DELETE"}, null),
+          new GenericRowWithSchema(new Object[] {1, "a", "new_data", "INSERT"}, null),
+          // next two rows belong to different partitions
+          new GenericRowWithSchema(new Object[] {2, "b", "data", "DELETE"}, null),
+          new GenericRowWithSchema(new Object[] {3, "c", "data", "INSERT"}, null),
+          new GenericRowWithSchema(new Object[] {4, "d", "data", "DELETE"}, null),
+          new GenericRowWithSchema(new Object[] {4, "d", "data", "INSERT"}, null));
+
+  private final int changeTypeIndex = 3;
+  private final List<Integer> partitionIdx = Lists.newArrayList(0, 1);
+
+  @Test
+  public void testUpdatedRows() {
+    ChangelogIterator iterator =
+        new ChangelogIterator(rows.iterator(), changeTypeIndex, partitionIdx, true);
+
+    List<Row> result = Lists.newArrayList(Iterators.filter(iterator, Objects::nonNull));
+    SparkTestBase.assertEquals(
+        "Rows should match",
+        Lists.newArrayList(
+            new Object[] {1, "a", "data", UPDATE_BEFORE.name()},
+            new Object[] {1, "a", "new_data", UPDATE_AFTER.name()},
+            new Object[] {2, "b", "data", "DELETE"},
+            new Object[] {3, "c", "data", "INSERT"}),
+        SparkTestBase.rowsToJava(result));

Review Comment:
   Question, why don't we return (4,d) here?



##########
spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ChangelogIterator.java:
##########
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark;
+
+import java.io.Serializable;
+import java.util.Iterator;
+import java.util.List;
+import org.apache.iceberg.ChangelogOperation;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.RowFactory;
+import org.apache.spark.sql.catalyst.expressions.GenericInternalRow;
+import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema;
+
+public class ChangelogIterator implements Iterator<Row>, Serializable {
+  private static final String DELETE = ChangelogOperation.DELETE.name();
+  private static final String INSERT = ChangelogOperation.INSERT.name();
+
+  private final Iterator<Row> rowIterator;
+  private final int changeTypeIndex;
+  private final List<Integer> partitionIdx;
+  private final boolean markUpdatedRows;
+
+  private Row cachedRow = null;
+
+  public ChangelogIterator(
+      Iterator<Row> rowIterator,
+      int changeTypeIndex,
+      List<Integer> partitionIdx,
+      boolean markUpdatedRows) {
+    this.rowIterator = rowIterator;
+    this.changeTypeIndex = changeTypeIndex;
+    this.partitionIdx = partitionIdx;
+    this.markUpdatedRows = markUpdatedRows;
+  }
+
+  @Override
+  public boolean hasNext() {
+    if (cachedRow != null) {
+      return true;
+    }
+    return rowIterator.hasNext();
+  }
+
+  @Override
+  public Row next() {
+    // if there is a processed cached row, return it directly
+    if (cachedRow != null
+        && !cachedRow.getString(changeTypeIndex).equals(DELETE)
+        && !cachedRow.getString(changeTypeIndex).equals(INSERT)) {
+      Row row = cachedRow;
+      cachedRow = null;
+      return row;
+    }
+
+    Row currentRow = currentRow();
+
+    if (rowIterator.hasNext()) {
+      GenericRowWithSchema nextRow = (GenericRowWithSchema) rowIterator.next();
+
+      if (withinPartition(currentRow, nextRow)
+          && currentRow.getString(changeTypeIndex).equals(DELETE)
+          && nextRow.getString(changeTypeIndex).equals(INSERT)) {
+
+        GenericInternalRow deletedRow =
+            new GenericInternalRow(((GenericRowWithSchema) currentRow).values());
+        GenericInternalRow insertedRow = new GenericInternalRow(nextRow.values());
+
+        // set the change_type to the same value
+        deletedRow.update(changeTypeIndex, "");
+        insertedRow.update(changeTypeIndex, "");

Review Comment:
   It's a bit hard to read (modify and recover the values).  Could we instead do comparison of deletedRow and insertedRow without the changeTypeIndex?



##########
spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ChangelogIterator.java:
##########
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark;
+
+import java.io.Serializable;
+import java.util.Iterator;
+import java.util.List;
+import org.apache.iceberg.ChangelogOperation;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.RowFactory;
+import org.apache.spark.sql.catalyst.expressions.GenericInternalRow;
+import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema;
+
+public class ChangelogIterator implements Iterator<Row>, Serializable {
+  private static final String DELETE = ChangelogOperation.DELETE.name();
+  private static final String INSERT = ChangelogOperation.INSERT.name();
+
+  private final Iterator<Row> rowIterator;
+  private final int changeTypeIndex;
+  private final List<Integer> partitionIdx;
+  private final boolean markUpdatedRows;
+
+  private Row cachedRow = null;
+
+  public ChangelogIterator(
+      Iterator<Row> rowIterator,
+      int changeTypeIndex,
+      List<Integer> partitionIdx,
+      boolean markUpdatedRows) {
+    this.rowIterator = rowIterator;
+    this.changeTypeIndex = changeTypeIndex;
+    this.partitionIdx = partitionIdx;
+    this.markUpdatedRows = markUpdatedRows;
+  }
+
+  @Override
+  public boolean hasNext() {
+    if (cachedRow != null) {
+      return true;
+    }
+    return rowIterator.hasNext();
+  }
+
+  @Override
+  public Row next() {
+    // if there is a processed cached row, return it directly
+    if (cachedRow != null
+        && !cachedRow.getString(changeTypeIndex).equals(DELETE)
+        && !cachedRow.getString(changeTypeIndex).equals(INSERT)) {
+      Row row = cachedRow;
+      cachedRow = null;
+      return row;
+    }
+
+    Row currentRow = currentRow();
+
+    if (rowIterator.hasNext()) {
+      GenericRowWithSchema nextRow = (GenericRowWithSchema) rowIterator.next();
+
+      if (withinPartition(currentRow, nextRow)
+          && currentRow.getString(changeTypeIndex).equals(DELETE)
+          && nextRow.getString(changeTypeIndex).equals(INSERT)) {
+
+        GenericInternalRow deletedRow =
+            new GenericInternalRow(((GenericRowWithSchema) currentRow).values());
+        GenericInternalRow insertedRow = new GenericInternalRow(nextRow.values());
+
+        // set the change_type to the same value
+        deletedRow.update(changeTypeIndex, "");
+        insertedRow.update(changeTypeIndex, "");
+
+        if (deletedRow.equals(insertedRow)) {
+          // remove two carry-over rows
+          currentRow = null;
+          this.cachedRow = null;
+        } else if (markUpdatedRows) {
+          // mark the updated rows
+          deletedRow.update(changeTypeIndex, ChangelogOperation.UPDATE_BEFORE.name());
+          currentRow = RowFactory.create(deletedRow.values());
+
+          insertedRow.update(changeTypeIndex, ChangelogOperation.UPDATE_AFTER.name());
+          this.cachedRow = RowFactory.create(insertedRow.values());
+        } else {
+          // recover the values of change type
+          deletedRow.update(changeTypeIndex, DELETE);
+          insertedRow.update(changeTypeIndex, INSERT);
+          this.cachedRow = nextRow;
+        }
+
+      } else {
+        this.cachedRow = nextRow;
+      }
+    }
+
+    return currentRow;
+  }
+
+  private Row currentRow() {
+    if (cachedRow != null) {
+      Row row = cachedRow;
+      cachedRow = null;
+      return row;
+    } else {
+      return rowIterator.next();
+    }
+  }
+
+  private boolean withinPartition(Row currentRow, Row nextRow) {
+    for (int i = 0; i < partitionIdx.size(); i++) {

Review Comment:
   I'm a bit confused here.  What rows are these?  They have directly the partition values in the row?  (No need to transform?).  And the partition field ids are always the same for all rows?



##########
spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ChangelogIterator.java:
##########
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark;
+
+import java.io.Serializable;
+import java.util.Iterator;
+import java.util.List;
+import org.apache.iceberg.ChangelogOperation;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.RowFactory;
+import org.apache.spark.sql.catalyst.expressions.GenericInternalRow;
+import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema;
+
+public class ChangelogIterator implements Iterator<Row>, Serializable {
+  private static final String DELETE = ChangelogOperation.DELETE.name();
+  private static final String INSERT = ChangelogOperation.INSERT.name();
+
+  private final Iterator<Row> rowIterator;
+  private final int changeTypeIndex;
+  private final List<Integer> partitionIdx;
+  private final boolean markUpdatedRows;
+
+  private Row cachedRow = null;
+
+  public ChangelogIterator(
+      Iterator<Row> rowIterator,
+      int changeTypeIndex,
+      List<Integer> partitionIdx,
+      boolean markUpdatedRows) {
+    this.rowIterator = rowIterator;
+    this.changeTypeIndex = changeTypeIndex;
+    this.partitionIdx = partitionIdx;
+    this.markUpdatedRows = markUpdatedRows;
+  }
+
+  @Override
+  public boolean hasNext() {
+    if (cachedRow != null) {
+      return true;
+    }
+    return rowIterator.hasNext();
+  }
+
+  @Override
+  public Row next() {
+    // if there is a processed cached row, return it directly
+    if (cachedRow != null
+        && !cachedRow.getString(changeTypeIndex).equals(DELETE)
+        && !cachedRow.getString(changeTypeIndex).equals(INSERT)) {
+      Row row = cachedRow;
+      cachedRow = null;
+      return row;
+    }
+
+    Row currentRow = currentRow();
+
+    if (rowIterator.hasNext()) {
+      GenericRowWithSchema nextRow = (GenericRowWithSchema) rowIterator.next();
+
+      if (withinPartition(currentRow, nextRow)
+          && currentRow.getString(changeTypeIndex).equals(DELETE)
+          && nextRow.getString(changeTypeIndex).equals(INSERT)) {
+
+        GenericInternalRow deletedRow =
+            new GenericInternalRow(((GenericRowWithSchema) currentRow).values());
+        GenericInternalRow insertedRow = new GenericInternalRow(nextRow.values());
+
+        // set the change_type to the same value
+        deletedRow.update(changeTypeIndex, "");
+        insertedRow.update(changeTypeIndex, "");
+
+        if (deletedRow.equals(insertedRow)) {
+          // remove two carry-over rows

Review Comment:
   Comment is not so clear to me.  Im still a bit confused the concept of carry-over, can't find where its defined but I see it some places in your other pr.  What about, "clear cached state"?  Unless carry-over has some significance.



##########
spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/ChangelogIterator.java:
##########
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.spark;
+
+import java.io.Serializable;
+import java.util.Iterator;
+import java.util.List;
+import org.apache.iceberg.ChangelogOperation;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.RowFactory;
+import org.apache.spark.sql.catalyst.expressions.GenericInternalRow;
+import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema;
+
+public class ChangelogIterator implements Iterator<Row>, Serializable {
+  private static final String DELETE = ChangelogOperation.DELETE.name();
+  private static final String INSERT = ChangelogOperation.INSERT.name();
+
+  private final Iterator<Row> rowIterator;
+  private final int changeTypeIndex;
+  private final List<Integer> partitionIdx;
+  private final boolean markUpdatedRows;
+
+  private Row cachedRow = null;
+
+  public ChangelogIterator(
+      Iterator<Row> rowIterator,
+      int changeTypeIndex,
+      List<Integer> partitionIdx,
+      boolean markUpdatedRows) {
+    this.rowIterator = rowIterator;
+    this.changeTypeIndex = changeTypeIndex;
+    this.partitionIdx = partitionIdx;
+    this.markUpdatedRows = markUpdatedRows;
+  }
+
+  @Override
+  public boolean hasNext() {
+    if (cachedRow != null) {
+      return true;
+    }
+    return rowIterator.hasNext();
+  }
+
+  @Override
+  public Row next() {
+    // if there is a processed cached row, return it directly
+    if (cachedRow != null
+        && !cachedRow.getString(changeTypeIndex).equals(DELETE)
+        && !cachedRow.getString(changeTypeIndex).equals(INSERT)) {
+      Row row = cachedRow;
+      cachedRow = null;
+      return row;
+    }
+
+    Row currentRow = currentRow();
+
+    if (rowIterator.hasNext()) {
+      GenericRowWithSchema nextRow = (GenericRowWithSchema) rowIterator.next();
+
+      if (withinPartition(currentRow, nextRow)
+          && currentRow.getString(changeTypeIndex).equals(DELETE)
+          && nextRow.getString(changeTypeIndex).equals(INSERT)) {
+
+        GenericInternalRow deletedRow =
+            new GenericInternalRow(((GenericRowWithSchema) currentRow).values());
+        GenericInternalRow insertedRow = new GenericInternalRow(nextRow.values());
+
+        // set the change_type to the same value
+        deletedRow.update(changeTypeIndex, "");
+        insertedRow.update(changeTypeIndex, "");
+
+        if (deletedRow.equals(insertedRow)) {
+          // remove two carry-over rows
+          currentRow = null;
+          this.cachedRow = null;
+        } else if (markUpdatedRows) {
+          // mark the updated rows
+          deletedRow.update(changeTypeIndex, ChangelogOperation.UPDATE_BEFORE.name());
+          currentRow = RowFactory.create(deletedRow.values());
+
+          insertedRow.update(changeTypeIndex, ChangelogOperation.UPDATE_AFTER.name());
+          this.cachedRow = RowFactory.create(insertedRow.values());
+        } else {
+          // recover the values of change type
+          deletedRow.update(changeTypeIndex, DELETE);

Review Comment:
   Is markUpdated just the underlying iterator?  (Wonder if we can short circuit).  Or what is the difference.  Not sure I understand the flag.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscribe@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@iceberg.apache.org
For additional commands, e-mail: issues-help@iceberg.apache.org