You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@iceberg.apache.org by GitBox <gi...@apache.org> on 2021/07/05 07:20:21 UTC

[GitHub] [iceberg] szehon-ho commented on issue #2783: Metadata Table Empty Projection -Unknown type for int field. Type name: java.lang.String

szehon-ho commented on issue #2783:
URL: https://github.com/apache/iceberg/issues/2783#issuecomment-873869272


   Adding a reproduction test (can run in spark3-extensions test case, for example):
   ```
   package org.apache.iceberg.spark.extensions;
   
   import com.google.common.collect.Lists;
   import org.apache.iceberg.spark.SparkCatalog;
   import org.apache.iceberg.spark.source.SimpleRecord;
   import org.apache.iceberg.spark.source.SparkTable;
   import org.apache.spark.sql.Column;
   import org.apache.spark.sql.Dataset;
   import org.apache.spark.sql.Encoders;
   import org.apache.spark.sql.Row;
   import org.apache.spark.sql.connector.catalog.Identifier;
   import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation;
   import org.junit.After;
   import org.junit.Assert;
   import org.junit.Test;
   import scala.Some;
   
   import java.util.Arrays;
   import java.util.List;
   import java.util.Map;
   
   public class TestSparkMetadataTable extends SparkExtensionsTestBase {
   
     public TestSparkMetadataTable(String catalogName, String implementation, Map<String, String> config) {
       super(catalogName, implementation, config);
     }
   
     @Test
     public void testCountEntriesPartitionedTable() throws Exception {
       // init load
       List<SimpleRecord> records = Lists.newArrayList(new SimpleRecord(1, "1"));
       Dataset<Row> inputDf = spark.createDataFrame(records, SimpleRecord.class);
       inputDf.writeTo(tableName).create();
   
   
       Dataset<Row> stringDs = spark.createDataset(Arrays.asList("my_path"), Encoders.STRING())
           .toDF("file_path");
   
       SparkCatalog catalog = (SparkCatalog) spark.sessionState().catalogManager().catalog(catalogName);
       String[] tableIdentifiers = tableName.split("\\.");
       Identifier metaId = Identifier.of(
           new String[]{tableIdentifiers[1], tableIdentifiers[2]}, "entries");
       SparkTable metaTable = catalog.loadTable(metaId);
       Dataset<Row> entriesDs = Dataset.ofRows(spark, DataSourceV2Relation.create(metaTable, Some.apply(catalog), Some.apply(
           metaId)));
   
       Column joinCond = entriesDs.col("data_file.file_path").equalTo(stringDs.col("file_path"));
       Dataset<Row> res = entriesDs.join(stringDs, joinCond);
       boolean empty = res.isEmpty();
       Assert.assertEquals(true, empty);
     }
   
     @After
     public void dropTables() {
       sql("DROP TABLE IF EXISTS %s", tableName);
     }
   }
   ```


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscribe@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@iceberg.apache.org
For additional commands, e-mail: issues-help@iceberg.apache.org