You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hudi.apache.org by GitBox <gi...@apache.org> on 2019/10/24 18:36:56 UTC

[GitHub] [incubator-hudi] taherk77 commented on a change in pull request #969: [HUDI-251] JDBC incremental load to HUDI DeltaStreamer

taherk77 commented on a change in pull request #969: [HUDI-251] JDBC incremental load to HUDI DeltaStreamer
URL: https://github.com/apache/incubator-hudi/pull/969#discussion_r338730094
 
 

 ##########
 File path: hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JDBCSource.java
 ##########
 @@ -0,0 +1,235 @@
+package org.apache.hudi.utilities.sources;
+
+import java.util.Arrays;
+import java.util.Set;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hudi.DataSourceUtils;
+import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.StringUtils;
+import org.apache.hudi.common.util.TypedProperties;
+import org.apache.hudi.common.util.collection.Pair;
+import org.apache.hudi.exception.HoodieException;
+import org.apache.hudi.utilities.schema.SchemaProvider;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.sql.Column;
+import org.apache.spark.sql.DataFrameReader;
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.SparkSession;
+import org.apache.spark.sql.functions;
+import org.apache.spark.sql.types.DataTypes;
+import org.jetbrains.annotations.NotNull;
+
+
+public class JDBCSource extends RowSource {
+
+  private static Logger LOG = LogManager.getLogger(JDBCSource.class);
+
+  public JDBCSource(TypedProperties props, JavaSparkContext sparkContext, SparkSession sparkSession,
+      SchemaProvider schemaProvider) {
+    super(props, sparkContext, sparkSession, schemaProvider);
+  }
+
+  private static DataFrameReader validatePropsAndGetDataFrameReader(final SparkSession session,
+      final TypedProperties properties)
+      throws HoodieException {
+    DataFrameReader dataFrameReader = null;
+    FSDataInputStream passwordFileStream = null;
+    try {
+      dataFrameReader = session.read().format("jdbc");
+      dataFrameReader = dataFrameReader.option(Config.URL_PROP, properties.getString(Config.URL));
+      dataFrameReader = dataFrameReader.option(Config.USER_PROP, properties.getString(Config.USER));
+      dataFrameReader = dataFrameReader.option(Config.DRIVER_PROP, properties.getString(Config.DRIVER_CLASS));
+      dataFrameReader = dataFrameReader
+          .option(Config.RDBMS_TABLE_PROP, properties.getString(Config.RDBMS_TABLE_NAME));
+
+      if (properties.containsKey(Config.PASSWORD) && !StringUtils
+          .isNullOrEmpty(properties.getString(Config.PASSWORD))) {
+        LOG.info("Reading JDBC password from properties file....");
+        dataFrameReader = dataFrameReader.option(Config.PASSWORD_PROP, properties.getString(Config.PASSWORD));
+      } else if (properties.containsKey(Config.PASSWORD_FILE) && !StringUtils
+          .isNullOrEmpty(properties.getString(Config.PASSWORD_FILE))) {
+        LOG.info(
+            String.format("Reading JDBC password from password file %s", properties.getString(Config.PASSWORD_FILE)));
+        FileSystem fileSystem = FileSystem.get(new Configuration());
+        passwordFileStream = fileSystem.open(new Path(properties.getString(Config.PASSWORD_FILE)));
+        byte[] bytes = new byte[passwordFileStream.available()];
+        passwordFileStream.read(bytes);
+        dataFrameReader = dataFrameReader.option(Config.PASSWORD_PROP, new String(bytes));
+      } else {
+        throw new IllegalArgumentException(String.format("JDBCSource needs either a %s or %s to connect to RDBMS "
+            + "datasource", Config.PASSWORD_FILE, Config.PASSWORD));
+      }
+
+      addExtraJdbcOptions(properties, dataFrameReader);
+
+      if (properties.containsKey(Config.IS_INCREMENTAL) && StringUtils
+          .isNullOrEmpty(properties.getString(Config.IS_INCREMENTAL))) {
+        DataSourceUtils.checkRequiredProperties(properties, Arrays.asList(Config.INCREMENTAL_COLUMN));
+      }
+      return dataFrameReader;
+    } catch (Exception e) {
+      throw new HoodieException(e);
+    } finally {
+      IOUtils.closeStream(passwordFileStream);
+    }
+  }
+
+  private static void addExtraJdbcOptions(TypedProperties properties, DataFrameReader dataFrameReader) {
+    Set<Object> objects = properties.keySet();
+    for (Object property : objects) {
+      String prop = (String) property;
+      if (prop.startsWith(Config.EXTRA_OPTIONS)) {
+        String[] split = prop.split("\\.");
+        String key = split[split.length - 1];
+        String value = properties.getString(prop);
+        LOG.info(String.format("Adding %s -> %s to jdbc options", key, value));
+        dataFrameReader.option(key, value);
 
 Review comment:
   I don't think it will be a.b it will always be a = value

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services