You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@flink.apache.org by GitBox <gi...@apache.org> on 2022/06/07 07:06:02 UTC

[GitHub] [flink-table-store] JingsongLi opened a new pull request, #146: [FLINK-27927] Improve table store connector common interfaces

JingsongLi opened a new pull request, #146:
URL: https://github.com/apache/flink-table-store/pull/146

   We currently have the initial FileStoreTable-related interface, but something is missing to satisfy our four approaches:
   1. Type conversion
   2. Data structure conversion
   3. Filter conversion
   4. Scan and Read
   
   In this jira, more easy-to-use features will be added.


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscribe@flink.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [flink-table-store] tsreaper commented on a diff in pull request #146: [FLINK-27927] Improve table store connector common interfaces

Posted by GitBox <gi...@apache.org>.
tsreaper commented on code in PR #146:
URL: https://github.com/apache/flink-table-store/pull/146#discussion_r890909263


##########
flink-table-store-common/src/main/java/org/apache/flink/table/store/utils/RowDataUtils.java:
##########
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.store.utils;
+
+import org.apache.flink.table.data.ArrayData;
+import org.apache.flink.table.data.GenericArrayData;
+import org.apache.flink.table.data.GenericMapData;
+import org.apache.flink.table.data.GenericRowData;
+import org.apache.flink.table.data.MapData;
+import org.apache.flink.table.data.RawValueData;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.data.StringData;
+import org.apache.flink.table.data.binary.BinaryArrayData;
+import org.apache.flink.table.data.binary.BinaryMapData;
+import org.apache.flink.table.data.binary.BinaryRawValueData;
+import org.apache.flink.table.data.binary.BinaryRowData;
+import org.apache.flink.table.data.binary.BinaryStringData;
+import org.apache.flink.table.data.binary.NestedRowData;
+import org.apache.flink.table.types.logical.ArrayType;
+import org.apache.flink.table.types.logical.DecimalType;
+import org.apache.flink.table.types.logical.IntType;
+import org.apache.flink.table.types.logical.LocalZonedTimestampType;
+import org.apache.flink.table.types.logical.LogicalType;
+import org.apache.flink.table.types.logical.MapType;
+import org.apache.flink.table.types.logical.MultisetType;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.flink.table.types.logical.TimestampType;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/** Utils for {@link RowData} structures. */
+public class RowDataUtils {
+
+    public static RowData copyRowData(RowData row, RowType rowType) {
+        if (row instanceof BinaryRowData) {
+            return ((BinaryRowData) row).copy();
+        } else if (row instanceof NestedRowData) {
+            return ((NestedRowData) row).copy();
+        } else {
+            GenericRowData ret = new GenericRowData(row.getArity());
+            ret.setRowKind(row.getRowKind());
+
+            for (int i = 0; i < row.getArity(); ++i) {
+                LogicalType fieldType = rowType.getTypeAt(i);
+                ret.setField(i, copy(get(row, i, fieldType), fieldType));
+            }
+
+            return ret;
+        }
+    }
+
+    public static ArrayData copyArray(ArrayData from, LogicalType eleType) {
+        if (from instanceof BinaryArrayData) {
+            return ((BinaryArrayData) from).copy();
+        }
+
+        if (!eleType.isNullable()) {
+            switch (eleType.getTypeRoot()) {
+                case BOOLEAN:
+                    return new GenericArrayData(from.toBooleanArray());
+                case TINYINT:
+                    return new GenericArrayData(from.toByteArray());
+                case SMALLINT:
+                    return new GenericArrayData(from.toShortArray());
+                case INTEGER:
+                case DATE:
+                case TIME_WITHOUT_TIME_ZONE:
+                    return new GenericArrayData(from.toIntArray());
+                case BIGINT:
+                    return new GenericArrayData(from.toLongArray());
+                case FLOAT:
+                    return new GenericArrayData(from.toFloatArray());
+                case DOUBLE:
+                    return new GenericArrayData(from.toDoubleArray());
+            }
+        }
+
+        Object[] newArray = new Object[from.size()];
+
+        for (int i = 0; i < newArray.length; ++i) {
+            if (!from.isNullAt(i)) {
+                newArray[i] = copy(get(from, i, eleType), eleType);
+            } else {
+                newArray[i] = null;
+            }
+        }
+
+        return new GenericArrayData(newArray);
+    }
+
+    private static MapData copyMap(MapData map, LogicalType keyType, LogicalType valueType) {
+        if (map instanceof BinaryMapData) {
+            return ((BinaryMapData) map).copy();
+        }
+
+        Map<Object, Object> javaMap = new HashMap<>();
+        ArrayData keys = map.keyArray();
+        ArrayData values = map.valueArray();
+        for (int i = 0; i < keys.size(); i++) {
+            javaMap.put(
+                    copy(get(keys, i, keyType), keyType),
+                    copy(get(values, i, valueType), valueType));
+        }
+        return new GenericMapData(javaMap);
+    }
+
+    @SuppressWarnings({"rawtypes", "unchecked"})
+    public static Object copy(Object o, LogicalType type) {
+        if (o instanceof StringData) {
+            BinaryStringData string = (BinaryStringData) o;
+            if (string.getBinarySection() != null) {
+                return ((BinaryStringData) o).copy();
+            }
+        } else if (o instanceof RowData) {
+            return copyRowData((RowData) o, (RowType) type);
+        } else if (o instanceof ArrayData) {
+            return copyArray((ArrayData) o, ((ArrayType) type).getElementType());
+        } else if (o instanceof MapData) {
+            if (type instanceof MapType) {
+                return copyMap(
+                        (MapData) o,
+                        ((MapType) type).getKeyType(),
+                        ((MapType) type).getValueType());
+            } else {
+                return copyMap((MapData) o, ((MultisetType) type).getElementType(), new IntType());
+            }
+        } else if (o instanceof RawValueData) {
+            BinaryRawValueData raw = (BinaryRawValueData) o;
+            if (raw.getBinarySection() != null) {
+                return BinaryRawValueData.fromBytes(raw.toBytes(null));
+            }
+        }
+        return o;

Review Comment:
   `BinaryStringData` may be reused. `BigDecimal` in `DecimalData` may also change.



##########
flink-table-store-core/src/main/java/org/apache/flink/table/store/table/AbstractFileStoreTable.java:
##########
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.store.table;
+
+import org.apache.flink.table.store.file.schema.Schema;
+import org.apache.flink.table.types.logical.RowType;
+
+/** Abstract {@link FileStoreTable}. */
+public abstract class AbstractFileStoreTable implements FileStoreTable {
+
+    private static final long serialVersionUID = 1L;
+
+    private final String name;
+    protected final Schema schema;
+
+    public AbstractFileStoreTable(String name, Schema schema) {
+        this.name = name;
+        this.schema = schema;
+    }
+
+    @Override
+    public String name() {
+        return name;

Review Comment:
   Also store `name` in `Schema`? This name is actually the name of table.
   
   If everything is stored in `Schema` we only need to expose `public Schema schema()` to users. No need to add methods for separate members.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscribe@flink.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [flink-table-store] JingsongLi closed pull request #146: [FLINK-27927] Improve table store connector common interfaces

Posted by GitBox <gi...@apache.org>.
JingsongLi closed pull request #146: [FLINK-27927] Improve table store connector common interfaces
URL: https://github.com/apache/flink-table-store/pull/146


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscribe@flink.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [flink-table-store] JingsongLi commented on a diff in pull request #146: [FLINK-27927] Improve table store connector common interfaces

Posted by GitBox <gi...@apache.org>.
JingsongLi commented on code in PR #146:
URL: https://github.com/apache/flink-table-store/pull/146#discussion_r890991997


##########
flink-table-store-common/src/main/java/org/apache/flink/table/store/utils/RowDataUtils.java:
##########
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.store.utils;
+
+import org.apache.flink.table.data.ArrayData;
+import org.apache.flink.table.data.GenericArrayData;
+import org.apache.flink.table.data.GenericMapData;
+import org.apache.flink.table.data.GenericRowData;
+import org.apache.flink.table.data.MapData;
+import org.apache.flink.table.data.RawValueData;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.data.StringData;
+import org.apache.flink.table.data.binary.BinaryArrayData;
+import org.apache.flink.table.data.binary.BinaryMapData;
+import org.apache.flink.table.data.binary.BinaryRawValueData;
+import org.apache.flink.table.data.binary.BinaryRowData;
+import org.apache.flink.table.data.binary.BinaryStringData;
+import org.apache.flink.table.data.binary.NestedRowData;
+import org.apache.flink.table.types.logical.ArrayType;
+import org.apache.flink.table.types.logical.DecimalType;
+import org.apache.flink.table.types.logical.IntType;
+import org.apache.flink.table.types.logical.LocalZonedTimestampType;
+import org.apache.flink.table.types.logical.LogicalType;
+import org.apache.flink.table.types.logical.MapType;
+import org.apache.flink.table.types.logical.MultisetType;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.flink.table.types.logical.TimestampType;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/** Utils for {@link RowData} structures. */
+public class RowDataUtils {
+
+    public static RowData copyRowData(RowData row, RowType rowType) {
+        if (row instanceof BinaryRowData) {
+            return ((BinaryRowData) row).copy();
+        } else if (row instanceof NestedRowData) {
+            return ((NestedRowData) row).copy();
+        } else {
+            GenericRowData ret = new GenericRowData(row.getArity());
+            ret.setRowKind(row.getRowKind());
+
+            for (int i = 0; i < row.getArity(); ++i) {
+                LogicalType fieldType = rowType.getTypeAt(i);
+                ret.setField(i, copy(get(row, i, fieldType), fieldType));
+            }
+
+            return ret;
+        }
+    }
+
+    public static ArrayData copyArray(ArrayData from, LogicalType eleType) {
+        if (from instanceof BinaryArrayData) {
+            return ((BinaryArrayData) from).copy();
+        }
+
+        if (!eleType.isNullable()) {
+            switch (eleType.getTypeRoot()) {
+                case BOOLEAN:
+                    return new GenericArrayData(from.toBooleanArray());
+                case TINYINT:
+                    return new GenericArrayData(from.toByteArray());
+                case SMALLINT:
+                    return new GenericArrayData(from.toShortArray());
+                case INTEGER:
+                case DATE:
+                case TIME_WITHOUT_TIME_ZONE:
+                    return new GenericArrayData(from.toIntArray());
+                case BIGINT:
+                    return new GenericArrayData(from.toLongArray());
+                case FLOAT:
+                    return new GenericArrayData(from.toFloatArray());
+                case DOUBLE:
+                    return new GenericArrayData(from.toDoubleArray());
+            }
+        }
+
+        Object[] newArray = new Object[from.size()];
+
+        for (int i = 0; i < newArray.length; ++i) {
+            if (!from.isNullAt(i)) {
+                newArray[i] = copy(get(from, i, eleType), eleType);
+            } else {
+                newArray[i] = null;
+            }
+        }
+
+        return new GenericArrayData(newArray);
+    }
+
+    private static MapData copyMap(MapData map, LogicalType keyType, LogicalType valueType) {
+        if (map instanceof BinaryMapData) {
+            return ((BinaryMapData) map).copy();
+        }
+
+        Map<Object, Object> javaMap = new HashMap<>();
+        ArrayData keys = map.keyArray();
+        ArrayData values = map.valueArray();
+        for (int i = 0; i < keys.size(); i++) {
+            javaMap.put(
+                    copy(get(keys, i, keyType), keyType),
+                    copy(get(values, i, valueType), valueType));
+        }
+        return new GenericMapData(javaMap);
+    }
+
+    @SuppressWarnings({"rawtypes", "unchecked"})
+    public static Object copy(Object o, LogicalType type) {
+        if (o instanceof StringData) {
+            BinaryStringData string = (BinaryStringData) o;
+            if (string.getBinarySection() != null) {
+                return ((BinaryStringData) o).copy();
+            }
+        } else if (o instanceof RowData) {
+            return copyRowData((RowData) o, (RowType) type);
+        } else if (o instanceof ArrayData) {
+            return copyArray((ArrayData) o, ((ArrayType) type).getElementType());
+        } else if (o instanceof MapData) {
+            if (type instanceof MapType) {
+                return copyMap(
+                        (MapData) o,
+                        ((MapType) type).getKeyType(),
+                        ((MapType) type).getValueType());
+            } else {
+                return copyMap((MapData) o, ((MultisetType) type).getElementType(), new IntType());
+            }
+        } else if (o instanceof RawValueData) {
+            BinaryRawValueData raw = (BinaryRawValueData) o;
+            if (raw.getBinarySection() != null) {
+                return BinaryRawValueData.fromBytes(raw.toBytes(null));
+            }
+        }
+        return o;

Review Comment:
   - `StringData` is already be copied.
   
   Yes, DecimalData needs to be copied.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscribe@flink.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [flink-table-store] JingsongLi commented on a diff in pull request #146: [FLINK-27927] Improve table store connector common interfaces

Posted by GitBox <gi...@apache.org>.
JingsongLi commented on code in PR #146:
URL: https://github.com/apache/flink-table-store/pull/146#discussion_r890990408


##########
flink-table-store-core/src/main/java/org/apache/flink/table/store/table/AbstractFileStoreTable.java:
##########
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.store.table;
+
+import org.apache.flink.table.store.file.schema.Schema;
+import org.apache.flink.table.types.logical.RowType;
+
+/** Abstract {@link FileStoreTable}. */
+public abstract class AbstractFileStoreTable implements FileStoreTable {
+
+    private static final long serialVersionUID = 1L;
+
+    private final String name;
+    protected final Schema schema;
+
+    public AbstractFileStoreTable(String name, Schema schema) {
+        this.name = name;
+        this.schema = schema;
+    }
+
+    @Override
+    public String name() {
+        return name;

Review Comment:
   I'm a little hesitant about this one: 1.
   1. this will lead to the schema tied to the name, the rename of the table may become very troublesome in the future. Need to go to modify the name.
   2. The name is already saved in the catalog, so saving another copy in the schema is a bit redundant.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscribe@flink.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [flink-table-store] JingsongLi commented on a diff in pull request #146: [FLINK-27927] Improve table store connector common interfaces

Posted by GitBox <gi...@apache.org>.
JingsongLi commented on code in PR #146:
URL: https://github.com/apache/flink-table-store/pull/146#discussion_r890991997


##########
flink-table-store-common/src/main/java/org/apache/flink/table/store/utils/RowDataUtils.java:
##########
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.store.utils;
+
+import org.apache.flink.table.data.ArrayData;
+import org.apache.flink.table.data.GenericArrayData;
+import org.apache.flink.table.data.GenericMapData;
+import org.apache.flink.table.data.GenericRowData;
+import org.apache.flink.table.data.MapData;
+import org.apache.flink.table.data.RawValueData;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.data.StringData;
+import org.apache.flink.table.data.binary.BinaryArrayData;
+import org.apache.flink.table.data.binary.BinaryMapData;
+import org.apache.flink.table.data.binary.BinaryRawValueData;
+import org.apache.flink.table.data.binary.BinaryRowData;
+import org.apache.flink.table.data.binary.BinaryStringData;
+import org.apache.flink.table.data.binary.NestedRowData;
+import org.apache.flink.table.types.logical.ArrayType;
+import org.apache.flink.table.types.logical.DecimalType;
+import org.apache.flink.table.types.logical.IntType;
+import org.apache.flink.table.types.logical.LocalZonedTimestampType;
+import org.apache.flink.table.types.logical.LogicalType;
+import org.apache.flink.table.types.logical.MapType;
+import org.apache.flink.table.types.logical.MultisetType;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.flink.table.types.logical.TimestampType;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/** Utils for {@link RowData} structures. */
+public class RowDataUtils {
+
+    public static RowData copyRowData(RowData row, RowType rowType) {
+        if (row instanceof BinaryRowData) {
+            return ((BinaryRowData) row).copy();
+        } else if (row instanceof NestedRowData) {
+            return ((NestedRowData) row).copy();
+        } else {
+            GenericRowData ret = new GenericRowData(row.getArity());
+            ret.setRowKind(row.getRowKind());
+
+            for (int i = 0; i < row.getArity(); ++i) {
+                LogicalType fieldType = rowType.getTypeAt(i);
+                ret.setField(i, copy(get(row, i, fieldType), fieldType));
+            }
+
+            return ret;
+        }
+    }
+
+    public static ArrayData copyArray(ArrayData from, LogicalType eleType) {
+        if (from instanceof BinaryArrayData) {
+            return ((BinaryArrayData) from).copy();
+        }
+
+        if (!eleType.isNullable()) {
+            switch (eleType.getTypeRoot()) {
+                case BOOLEAN:
+                    return new GenericArrayData(from.toBooleanArray());
+                case TINYINT:
+                    return new GenericArrayData(from.toByteArray());
+                case SMALLINT:
+                    return new GenericArrayData(from.toShortArray());
+                case INTEGER:
+                case DATE:
+                case TIME_WITHOUT_TIME_ZONE:
+                    return new GenericArrayData(from.toIntArray());
+                case BIGINT:
+                    return new GenericArrayData(from.toLongArray());
+                case FLOAT:
+                    return new GenericArrayData(from.toFloatArray());
+                case DOUBLE:
+                    return new GenericArrayData(from.toDoubleArray());
+            }
+        }
+
+        Object[] newArray = new Object[from.size()];
+
+        for (int i = 0; i < newArray.length; ++i) {
+            if (!from.isNullAt(i)) {
+                newArray[i] = copy(get(from, i, eleType), eleType);
+            } else {
+                newArray[i] = null;
+            }
+        }
+
+        return new GenericArrayData(newArray);
+    }
+
+    private static MapData copyMap(MapData map, LogicalType keyType, LogicalType valueType) {
+        if (map instanceof BinaryMapData) {
+            return ((BinaryMapData) map).copy();
+        }
+
+        Map<Object, Object> javaMap = new HashMap<>();
+        ArrayData keys = map.keyArray();
+        ArrayData values = map.valueArray();
+        for (int i = 0; i < keys.size(); i++) {
+            javaMap.put(
+                    copy(get(keys, i, keyType), keyType),
+                    copy(get(values, i, valueType), valueType));
+        }
+        return new GenericMapData(javaMap);
+    }
+
+    @SuppressWarnings({"rawtypes", "unchecked"})
+    public static Object copy(Object o, LogicalType type) {
+        if (o instanceof StringData) {
+            BinaryStringData string = (BinaryStringData) o;
+            if (string.getBinarySection() != null) {
+                return ((BinaryStringData) o).copy();
+            }
+        } else if (o instanceof RowData) {
+            return copyRowData((RowData) o, (RowType) type);
+        } else if (o instanceof ArrayData) {
+            return copyArray((ArrayData) o, ((ArrayType) type).getElementType());
+        } else if (o instanceof MapData) {
+            if (type instanceof MapType) {
+                return copyMap(
+                        (MapData) o,
+                        ((MapType) type).getKeyType(),
+                        ((MapType) type).getValueType());
+            } else {
+                return copyMap((MapData) o, ((MultisetType) type).getElementType(), new IntType());
+            }
+        } else if (o instanceof RawValueData) {
+            BinaryRawValueData raw = (BinaryRawValueData) o;
+            if (raw.getBinarySection() != null) {
+                return BinaryRawValueData.fromBytes(raw.toBytes(null));
+            }
+        }
+        return o;

Review Comment:
   yes



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscribe@flink.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org