You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@seatunnel.apache.org by GitBox <gi...@apache.org> on 2022/02/16 06:14:41 UTC

[GitHub] [incubator-seatunnel] leo65535 commented on a change in pull request #1254: [Feature] [connector] Support flink jdbc sink based on new flink version

leo65535 commented on a change in pull request #1254:
URL: https://github.com/apache/incubator-seatunnel/pull/1254#discussion_r807575891



##########
File path: seatunnel-connectors/seatunnel-connector-flink-jdbc/src/main/java/org/apache/seatunnel/flink/sink/JdbcSink.java
##########
@@ -81,34 +85,24 @@ public void prepare(FlinkEnvironment env) {
     @Nullable
     public DataStreamSink<Row> outputStream(FlinkEnvironment env, DataStream<Row> dataStream) {
         Table table = env.getStreamTableEnvironment().fromDataStream(dataStream);
-        createSink(env.getStreamTableEnvironment(), table);
-        return null;
-    }
+        TypeInformation<?>[] fieldTypes = table.getSchema().getFieldTypes();
 
-    @Override
-    @Nullable
-    public DataSink<Row> outputBatch(FlinkEnvironment env, DataSet<Row> dataSet) {
-        final Table table = env.getBatchTableEnvironment().fromDataSet(dataSet);
-        createSink(env.getBatchTableEnvironment(), table);
+        int[] types = Arrays.stream(fieldTypes).mapToInt(JdbcTypeUtil::typeInformationToSqlType).toArray();
+        SinkFunction<Row> sink = org.apache.flink.connector.jdbc.JdbcSink.sink(
+            query,
+            (st, row) -> JdbcUtils.setRecordToStatement(st, types, row),
+            JdbcExecutionOptions.builder()
+                .withBatchSize(batchSize)

Review comment:
       Make sense, thanks

##########
File path: seatunnel-connectors/seatunnel-connector-flink-jdbc/src/main/java/org/apache/seatunnel/flink/sink/JdbcSink.java
##########
@@ -81,34 +85,24 @@ public void prepare(FlinkEnvironment env) {
     @Nullable
     public DataStreamSink<Row> outputStream(FlinkEnvironment env, DataStream<Row> dataStream) {
         Table table = env.getStreamTableEnvironment().fromDataStream(dataStream);
-        createSink(env.getStreamTableEnvironment(), table);
-        return null;
-    }
+        TypeInformation<?>[] fieldTypes = table.getSchema().getFieldTypes();
 
-    @Override
-    @Nullable
-    public DataSink<Row> outputBatch(FlinkEnvironment env, DataSet<Row> dataSet) {
-        final Table table = env.getBatchTableEnvironment().fromDataSet(dataSet);
-        createSink(env.getBatchTableEnvironment(), table);
+        int[] types = Arrays.stream(fieldTypes).mapToInt(JdbcTypeUtil::typeInformationToSqlType).toArray();
+        SinkFunction<Row> sink = org.apache.flink.connector.jdbc.JdbcSink.sink(
+            query,
+            (st, row) -> JdbcUtils.setRecordToStatement(st, types, row),
+            JdbcExecutionOptions.builder()
+                .withBatchSize(batchSize)
+                .build(),
+            new JdbcConnectionOptions.JdbcConnectionOptionsBuilder()
+                .withUrl(dbUrl)
+                .withDriverName(driverName)
+                .withUsername(username)
+                .withPassword(password)
+                .build());
+
+        dataStream.addSink(sink);

Review comment:
       Make sense, thanks




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@seatunnel.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org