You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iceberg.apache.org by pv...@apache.org on 2021/04/12 07:50:21 UTC
[iceberg] branch master updated: Hive: Capitalize write format
value when creating writer (#2449)
This is an automated email from the ASF dual-hosted git repository.
pvary pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iceberg.git
The following commit(s) were added to refs/heads/master by this push:
new 93fb7ce Hive: Capitalize write format value when creating writer (#2449)
93fb7ce is described below
commit 93fb7ce28e9e809c412910f72d1ace6312de93e5
Author: Marton Bod <ma...@gmail.com>
AuthorDate: Mon Apr 12 09:50:12 2021 +0200
Hive: Capitalize write format value when creating writer (#2449)
---
.../iceberg/mr/hive/HiveIcebergOutputFormat.java | 3 ++-
.../TestHiveIcebergStorageHandlerWithEngine.java | 24 ++++++++++++++++++++++
2 files changed, 26 insertions(+), 1 deletion(-)
diff --git a/mr/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputFormat.java b/mr/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputFormat.java
index 080c015..9a3c54a 100644
--- a/mr/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputFormat.java
+++ b/mr/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputFormat.java
@@ -19,6 +19,7 @@
package org.apache.iceberg.mr.hive;
+import java.util.Locale;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -73,7 +74,7 @@ public class HiveIcebergOutputFormat<T> implements OutputFormat<NullWritable, Co
Schema schema = HiveIcebergStorageHandler.schema(jc);
PartitionSpec spec = table.spec();
FileFormat fileFormat = FileFormat.valueOf(PropertyUtil.propertyAsString(table.properties(),
- TableProperties.DEFAULT_FILE_FORMAT, TableProperties.DEFAULT_FILE_FORMAT_DEFAULT));
+ TableProperties.DEFAULT_FILE_FORMAT, TableProperties.DEFAULT_FILE_FORMAT_DEFAULT).toUpperCase(Locale.ENGLISH));
long targetFileSize = PropertyUtil.propertyAsLong(table.properties(), TableProperties.WRITE_TARGET_FILE_SIZE_BYTES,
TableProperties.WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT);
FileIO io = table.io();
diff --git a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithEngine.java b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithEngine.java
index 41aef8b..c55f731 100644
--- a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithEngine.java
+++ b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithEngine.java
@@ -30,9 +30,11 @@ import org.apache.iceberg.FileFormat;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.data.GenericRecord;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.hive.HiveSchemaUtil;
+import org.apache.iceberg.mr.Catalogs;
import org.apache.iceberg.mr.TestHelper;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
@@ -662,6 +664,28 @@ public class TestHiveIcebergStorageHandlerWithEngine {
HiveIcebergTestUtils.validateData(target2, target2Records, 1);
}
+ @Test
+ public void testWriteWithDefaultWriteFormat() {
+ Assume.assumeTrue("Testing the default file format is enough for a single scenario.",
+ executionEngine.equals("mr") && Catalogs.hiveCatalog(shell.getHiveConf()) && fileFormat == FileFormat.ORC);
+
+ TableIdentifier identifier = TableIdentifier.of("default", "customers");
+
+ // create Iceberg table without specifying a write format in the tbl properties
+ // it should fall back to using the default file format
+ shell.executeStatement(String.format("CREATE EXTERNAL TABLE %s (id bigint, name string) STORED BY '%s' %s",
+ identifier,
+ HiveIcebergStorageHandler.class.getName(),
+ testTables.locationForCreateTableSQL(identifier)));
+
+ shell.executeStatement(String.format("INSERT INTO %s VALUES (10, 'Linda')", identifier));
+ List<Object[]> results = shell.executeStatement(String.format("SELECT * FROM %s", identifier));
+
+ Assert.assertEquals(1, results.size());
+ Assert.assertEquals(10L, results.get(0)[0]);
+ Assert.assertEquals("Linda", results.get(0)[1]);
+ }
+
private void testComplexTypeWrite(Schema schema, List<Record> records) throws IOException {
String tableName = "complex_table";
Table table = testTables.createTable(shell, "complex_table", schema, fileFormat, ImmutableList.of());