You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hudi.apache.org by GitBox <gi...@apache.org> on 2020/05/06 13:19:00 UTC

[GitHub] [incubator-hudi] yanghua commented on a change in pull request #1574: [HUDI-701]Add unit test for HDFSParquetImportCommand

yanghua commented on a change in pull request #1574:
URL: https://github.com/apache/incubator-hudi/pull/1574#discussion_r420772541



##########
File path: hudi-cli/src/test/java/org/apache/hudi/cli/integ/ITTestHDFSParquetImportCommand.java
##########
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.cli.integ;
+
+import org.apache.avro.generic.GenericRecord;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hudi.cli.AbstractShellIntegrationTest;
+import org.apache.hudi.cli.HoodieCLI;
+import org.apache.hudi.cli.commands.TableCommand;
+import org.apache.hudi.common.HoodieClientTestUtils;
+import org.apache.hudi.common.HoodieTestDataGenerator;
+import org.apache.hudi.common.model.HoodieTableType;
+import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion;
+import org.apache.hudi.utilities.HDFSParquetImporter;
+import org.apache.hudi.utilities.TestHDFSParquetImporter;
+import org.apache.hudi.utilities.TestHDFSParquetImporter.HoodieTripModel;
+
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Row;
+
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.springframework.shell.core.CommandResult;
+
+import java.io.File;
+import java.io.IOException;
+import java.text.ParseException;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import static org.junit.jupiter.api.Assertions.assertAll;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+/**
+ * Test class for {@link org.apache.hudi.cli.commands.HDFSParquetImportCommand}.
+ */
+public class ITTestHDFSParquetImportCommand extends AbstractShellIntegrationTest {
+
+  private Path sourcePath;
+  private Path targetPath;
+  private String tableName;
+  private String schemaFile;
+  private String tablePath;
+
+  private List<GenericRecord> insertData;
+  private TestHDFSParquetImporter importer;
+
+  @BeforeEach
+  public void init() throws IOException, ParseException {
+    tableName = "test_table";
+    tablePath = basePath + File.separator + tableName;
+    sourcePath = new Path(basePath, "source");
+    targetPath = new Path(tablePath);
+    schemaFile = new Path(basePath, "file.schema").toString();
+
+    // create schema file
+    try (FSDataOutputStream schemaFileOS = fs.create(new Path(schemaFile))) {
+      schemaFileOS.write(HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA.getBytes());
+    }
+
+    importer = new TestHDFSParquetImporter();
+    insertData = importer.createInsertRecords(sourcePath);
+  }
+
+  /**
+   * Test case for 'hdfsparquetimport' with insert.
+   */
+  @Test
+  public void testConvertWithInsert() throws IOException {
+    String command = String.format("hdfsparquetimport --srcPath %s --targetPath %s --tableName %s "
+        + "--tableType %s --rowKeyField %s" + " --partitionPathField %s --parallelism %s "
+        + "--schemaFilePath %s --format %s --sparkMemory %s --retry %s --sparkMaster %s",
+        sourcePath.toString(), targetPath.toString(), tableName, HoodieTableType.COPY_ON_WRITE.name(),
+        "_row_key", "timestamp", "1", schemaFile, "parquet", "2G", "1", "local");
+    CommandResult cr = getShell().executeCommand(command);
+
+    assertAll("Command run success",
+        () -> assertTrue(cr.isSuccess()),
+        () -> assertEquals("Table imported to hoodie format", cr.getResult().toString()));
+
+    // Check hudi table exist
+    String metaPath = targetPath + File.separator + HoodieTableMetaClient.METAFOLDER_NAME;
+    assertTrue(new File(metaPath).exists(), "Hoodie table not exist.");

Review comment:
       use `Files.exists(xx)`?

##########
File path: hudi-cli/src/main/java/org/apache/hudi/cli/commands/HDFSParquetImportCommand.java
##########
@@ -57,6 +56,7 @@ public String convert(
       @CliOption(key = "schemaFilePath", mandatory = true,
           help = "Path for Avro schema file") final String schemaFilePath,
       @CliOption(key = "format", mandatory = true, help = "Format for the input data") final String format,
+      @CliOption(key = "sparkMaster", unspecifiedDefaultValue = "", help = "Spark Master ") String master,

Review comment:
       `"Spark Master "` -> `"Spark Master"` (remove the right empty backspace)

##########
File path: hudi-cli/src/main/java/org/apache/hudi/cli/commands/SparkMain.java
##########
@@ -82,17 +82,17 @@ public static void main(String[] args) throws Exception {
         break;
       case IMPORT:
       case UPSERT:
-        assert (args.length >= 12);
+        assert (args.length >= 13);
         String propsFilePath = null;
-        if (!StringUtils.isNullOrEmpty(args[11])) {
-          propsFilePath = args[11];
+        if (!StringUtils.isNullOrEmpty(args[12])) {
+          propsFilePath = args[12];
         }
         List<String> configs = new ArrayList<>();
-        if (args.length > 12) {
-          configs.addAll(Arrays.asList(args).subList(12, args.length));
+        if (args.length > 13) {
+          configs.addAll(Arrays.asList(args).subList(13, args.length));
         }
-        returnCode = dataLoad(jsc, command, args[1], args[2], args[3], args[4], args[5], args[6],
-            Integer.parseInt(args[7]), args[8], args[9], Integer.parseInt(args[10]), propsFilePath, configs);
+        returnCode = dataLoad(jsc, command, args[3], args[4], args[5], args[6], args[7], args[8],

Review comment:
       **Please note that I am not talking about you here**. But these array indexes containing numbers are very ugly and unreadable. We should think of a way to improve it.
   
   We should parse all parameters and it is best to do:
   1) Define appropriate variables to store each parameter to improve the readability of the code;
   2) Refactor it, yes, the parse of the parameters should be order-independent;
   
   WDYT? @hddong @vinothchandar 

##########
File path: hudi-cli/src/main/java/org/apache/hudi/cli/commands/HDFSParquetImportCommand.java
##########
@@ -78,8 +76,8 @@ public String convert(
       cmd = SparkCommand.UPSERT.toString();
     }
 
-    sparkLauncher.addAppArgs(cmd, srcPath, targetPath, tableName, tableType, rowKeyField, partitionPathField,
-        parallelism, schemaFilePath, sparkMemory, retry, propsFilePath);
+    sparkLauncher.addAppArgs(cmd, master, sparkMemory, srcPath, targetPath, tableName, tableType, rowKeyField,

Review comment:
       As a refactor suggestion, it would be better to define a data structure to store the cli args to avoid change the signature of the method frequently.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org