You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@inlong.apache.org by he...@apache.org on 2022/05/22 04:43:44 UTC
[incubator-inlong] branch master updated: [INLONG-4282][Sort] Optimize the sort package structure (#4290)
This is an automated email from the ASF dual-hosted git repository.
healchow pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-inlong.git
The following commit(s) were added to refs/heads/master by this push:
new 000716ef3 [INLONG-4282][Sort] Optimize the sort package structure (#4290)
000716ef3 is described below
commit 000716ef35521628de2c6acfa35fe5701cba5049
Author: yunqingmoswu <44...@users.noreply.github.com>
AuthorDate: Sun May 22 12:43:39 2022 +0800
[INLONG-4282][Sort] Optimize the sort package structure (#4290)
---
LICENSE | 236 +++----
inlong-sort/pom.xml | 5 +-
.../protocol/node/load/ClickHouseLoadNode.java | 2 +-
inlong-sort/sort-connectors/hive/pom.xml | 89 +++
.../inlong/sort}/hive/HadoopFileSystemFactory.java | 7 +-
.../org/apache/inlong/sort}/hive/HiveOptions.java | 6 +-
.../sort}/hive/HiveRowDataPartitionComputer.java | 7 +-
.../sort}/hive/HiveRowPartitionComputer.java | 5 +-
.../sort}/hive/HiveTableMetaStoreFactory.java | 17 +-
.../apache/inlong/sort}/hive/HiveTableSink.java | 44 +-
.../apache/inlong/sort}/hive/HiveValidator.java | 2 +-
.../sort/hive/table}/HiveTableInlongFactory.java | 31 +-
.../org.apache.flink.table.factories.Factory | 4 +-
inlong-sort/sort-connectors/jdbc/pom.xml | 57 ++
.../jdbc/clickhouse}/ClickHouseRowConverter.java | 78 +--
.../sort/jdbc/dialect}/ClickHouseDialect.java | 245 +++----
.../sort}/jdbc/table/AbstractJdbcDialect.java | 206 +++---
.../inlong/sort}/jdbc/table/JdbcDialects.java | 122 ++--
.../sort}/jdbc/table/JdbcDynamicTableFactory.java | 767 ++++++++++-----------
.../org.apache.flink.table.factories.Factory | 34 +-
inlong-sort/sort-connectors/kafka/pom.xml | 35 +
.../kafka/DynamicKafkaSerializationSchema.java | 30 +-
.../inlong/sort}/kafka/KafkaDynamicSink.java | 91 +--
.../kafka/table}/KafkaDynamicTableFactory.java | 158 ++---
.../org.apache.flink.table.factories.Factory | 34 +-
inlong-sort/sort-connectors/mysql-cdc/pom.xml | 66 ++
.../debezium/DebeziumDeserializationSchema.java | 5 +-
.../sort}/cdc/debezium/DebeziumSourceFunction.java | 132 ++--
.../JsonDebeziumDeserializationSchema.java | 7 +-
.../StringDebeziumDeserializationSchema.java | 2 +-
.../inlong/sort}/cdc/debezium/Validator.java | 2 +-
.../history/FlinkJsonTableChangeSerializer.java | 3 +-
.../debezium/internal/DebeziumChangeConsumer.java | 7 +-
.../debezium/internal/DebeziumChangeFetcher.java | 34 +-
.../cdc/debezium/internal/DebeziumOffset.java | 5 +-
.../internal/DebeziumOffsetSerializer.java | 5 +-
.../internal/EmbeddedEngineChangeEvent.java | 2 +-
.../debezium/internal/FlinkDatabaseHistory.java | 45 +-
.../internal/FlinkDatabaseSchemaHistory.java | 45 +-
.../debezium/internal/FlinkOffsetBackingStore.java | 29 +-
.../sort}/cdc/debezium/internal/Handover.java | 17 +-
.../sort}/cdc/debezium/internal/SchemaRecord.java | 5 +-
.../debezium/table/AppendMetadataCollector.java | 5 +-
.../sort}/cdc/debezium/table/DebeziumOptions.java | 2 +-
.../table/DeserializationRuntimeConverter.java | 5 +-
.../DeserializationRuntimeConverterFactory.java | 5 +-
.../cdc/debezium/table/MetadataConverter.java | 7 +-
.../table/RowDataDebeziumDeserializeSchema.java | 330 ++++-----
.../cdc/debezium/utils/DatabaseHistoryUtil.java | 28 +-
.../cdc/debezium/utils/TemporalConversions.java | 2 +-
.../apache/inlong/sort}/cdc/mysql/MySqlSource.java | 51 +-
.../inlong/sort}/cdc/mysql/MySqlValidator.java | 27 +-
.../cdc/mysql/SeekBinlogToTimestampFilter.java | 9 +-
.../sort}/cdc/mysql/debezium/DebeziumUtils.java | 19 +-
.../debezium/EmbeddedFlinkDatabaseHistory.java | 30 +-
.../debezium/dispatcher/EventDispatcherImpl.java | 26 +-
.../debezium/dispatcher/SignalEventDispatcher.java | 17 +-
.../mysql/debezium/reader/BinlogSplitReader.java | 63 +-
.../cdc/mysql/debezium/reader/DebeziumReader.java | 4 +-
.../mysql/debezium/reader/SnapshotSplitReader.java | 53 +-
.../debezium/task/MySqlBinlogSplitReadTask.java | 22 +-
.../debezium/task/MySqlSnapshotSplitReadTask.java | 123 ++--
.../debezium/task/context/MySqlErrorHandler.java | 7 +-
.../task/context/MySqlTaskContextImpl.java | 2 +-
.../debezium/task/context/StatefulTaskContext.java | 141 ++--
.../cdc/mysql/schema/MySqlFieldDefinition.java | 18 +-
.../inlong/sort}/cdc/mysql/schema/MySqlSchema.java | 19 +-
.../cdc/mysql/schema/MySqlTableDefinition.java | 15 +-
.../sort}/cdc/mysql/schema/MySqlTypeUtils.java | 2 +-
.../inlong/sort}/cdc/mysql/source/MySqlSource.java | 71 +-
.../sort}/cdc/mysql/source/MySqlSourceBuilder.java | 61 +-
.../cdc/mysql/source/assigners/AssignerStatus.java | 6 +-
.../cdc/mysql/source/assigners/ChunkRange.java | 8 +-
.../cdc/mysql/source/assigners/ChunkSplitter.java | 133 ++--
.../source/assigners/MySqlBinlogSplitAssigner.java | 35 +-
.../source/assigners/MySqlHybridSplitAssigner.java | 35 +-
.../assigners/MySqlSnapshotSplitAssigner.java | 74 +-
.../mysql/source/assigners/MySqlSplitAssigner.java | 21 +-
.../assigners/state/BinlogPendingSplitsState.java | 2 +-
.../assigners/state/HybridPendingSplitsState.java | 2 +-
.../source/assigners/state/PendingSplitsState.java | 2 +-
.../state/PendingSplitsStateSerializer.java | 23 +-
.../state/SnapshotPendingSplitsState.java | 35 +-
.../cdc/mysql/source/config/MySqlSourceConfig.java | 20 +-
.../source/config/MySqlSourceConfigFactory.java | 84 ++-
.../mysql/source/config/MySqlSourceOptions.java | 15 +-
.../cdc/mysql/source/config/ServerIdRange.java | 8 +-
.../mysql/source/connection/ConnectionPoolId.java | 2 +-
.../mysql/source/connection/ConnectionPools.java | 8 +-
.../source/connection/JdbcConnectionFactory.java | 13 +-
.../source/connection/JdbcConnectionPools.java | 17 +-
.../source/connection/PooledDataSourceFactory.java | 9 +-
.../source/enumerator/MySqlSourceEnumerator.java | 61 +-
.../mysql/source/events/BinlogSplitMetaEvent.java | 15 +-
.../source/events/BinlogSplitMetaRequestEvent.java | 6 +-
.../events/FinishedSnapshotSplitsAckEvent.java | 9 +-
.../events/FinishedSnapshotSplitsReportEvent.java | 11 +-
.../events/FinishedSnapshotSplitsRequestEvent.java | 6 +-
.../events/LatestFinishedSplitsSizeEvent.java | 6 +-
.../LatestFinishedSplitsSizeRequestEvent.java | 6 +-
.../source/events/SuspendBinlogReaderAckEvent.java | 6 +-
.../source/events/SuspendBinlogReaderEvent.java | 6 +-
.../cdc/mysql/source/events/WakeupReaderEvent.java | 21 +-
.../source/metrics/MySqlSourceReaderMetrics.java | 8 +-
.../cdc/mysql/source/offset/BinlogOffset.java | 9 +-
.../source/offset/BinlogOffsetSerializer.java | 7 +-
.../mysql/source/reader/MySqlRecordEmitter.java | 39 +-
.../cdc/mysql/source/reader/MySqlSourceReader.java | 83 +--
.../source/reader/MySqlSourceReaderContext.java | 2 +-
.../cdc/mysql/source/reader/MySqlSplitReader.java | 45 +-
.../source/split/FinishedSnapshotSplitInfo.java | 97 +--
.../cdc/mysql/source/split/MySqlBinlogSplit.java | 118 ++--
.../mysql/source/split/MySqlBinlogSplitState.java | 17 +-
.../sort}/cdc/mysql/source/split/MySqlRecords.java | 9 +-
.../cdc/mysql/source/split/MySqlSnapshotSplit.java | 29 +-
.../source/split/MySqlSnapshotSplitState.java | 12 +-
.../sort}/cdc/mysql/source/split/MySqlSplit.java | 5 +-
.../mysql/source/split/MySqlSplitSerializer.java | 185 ++---
.../cdc/mysql/source/split/MySqlSplitState.java | 2 +-
.../sort}/cdc/mysql/source/utils/ChunkUtils.java | 21 +-
.../sort}/cdc/mysql/source/utils/ObjectUtils.java | 2 +-
.../sort}/cdc/mysql/source/utils/RecordUtils.java | 80 ++-
.../cdc/mysql/source/utils/SerializerUtils.java | 21 +-
.../cdc/mysql/source/utils/StatementUtils.java | 19 +-
.../mysql/source/utils/TableDiscoveryUtils.java | 23 +-
.../inlong/sort}/cdc/mysql/table/JdbcUrlUtils.java | 2 +-
.../MySqlDeserializationConverterFactory.java | 21 +-
.../cdc/mysql/table/MySqlReadableMetadata.java | 156 +++--
.../mysql/table/MySqlTableInlongSourceFactory.java | 79 +--
.../sort}/cdc/mysql/table/MySqlTableSource.java | 24 +-
.../cdc/mysql/table/OldFieldMetadataConverter.java | 39 +-
.../inlong/sort}/cdc/mysql/table/StartupMode.java | 2 +-
.../sort}/cdc/mysql/table/StartupOptions.java | 6 +-
.../org.apache.flink.table.factories.Factory | 4 +-
inlong-sort/sort-connectors/pom.xml | 165 ++---
inlong-sort/sort-connectors/pulsar/pom.xml | 49 ++
.../table/DynamicPulsarSerializationSchema.java | 502 +++++++-------
.../pulsar/table/PulsarDynamicTableFactory.java | 740 ++++++++++----------
.../sort}/pulsar/table/PulsarDynamicTableSink.java | 767 +++++++++++----------
.../table/UpsertPulsarDynamicTableFactory.java | 726 ++++++++++---------
.../org.apache.flink.table.factories.Factory | 35 +-
.../clickhouse/ClickHouseConnectionProvider.java | 176 -----
.../flink/clickhouse/ClickHouseRowConverter.java | 163 -----
.../flink/clickhouse/ClickHouseSinkFunction.java | 89 ---
.../clickhouse/ClickHouseStatementFactory.java | 76 --
.../executor/ClickHouseAppendExecutor.java | 122 ----
.../clickhouse/executor/ClickHouseExecutor.java | 41 --
.../executor/ClickHouseExecutorFactory.java | 46 --
.../executor/ClickHouseUpsertExecutor.java | 141 ----
.../output/AbstractClickHouseOutputFormat.java | 40 --
.../output/ClickHouseBatchOutputFormat.java | 126 ----
.../output/ClickHouseOutputFormatFactory.java | 89 ---
.../output/ClickHouseShardOutputFormat.java | 178 -----
.../clickhouse/partitioner/BalancePartitioner.java | 38 -
.../partitioner/ClickHousePartitioner.java | 31 -
.../clickhouse/partitioner/HashPartitioner.java | 43 --
.../clickhouse/partitioner/RandomPartitioner.java | 36 -
.../inlong/sort/flink/doris/DorisSinkFunction.java | 77 ---
.../inlong/sort/flink/doris/DorisSinkOptions.java | 93 ---
.../sort/flink/doris/DorisSinkOptionsBuilder.java | 117 ----
.../sort/flink/doris/load/DorisBeInfoResponse.java | 60 --
.../sort/flink/doris/load/DorisConnectParam.java | 110 ---
.../sort/flink/doris/load/DorisHttpUtils.java | 99 ---
.../inlong/sort/flink/doris/load/DorisRespond.java | 64 --
.../sort/flink/doris/load/DorisRespondMsg.java | 219 ------
.../sort/flink/doris/load/DorisRowConverter.java | 38 -
.../sort/flink/doris/load/DorisStreamLoad.java | 152 ----
.../sort/flink/doris/output/DorisOutputFormat.java | 174 -----
.../inlong/sort/flink/filesystem/Bucket.java | 433 ------------
.../sort/flink/filesystem/BucketAssigner.java | 78 ---
.../sort/flink/filesystem/BucketFactory.java | 49 --
.../inlong/sort/flink/filesystem/BucketState.java | 118 ----
.../flink/filesystem/BucketStateSerializer.java | 168 -----
.../inlong/sort/flink/filesystem/Buckets.java | 369 ----------
.../sort/flink/filesystem/BulkPartWriter.java | 103 ---
.../flink/filesystem/DefaultBucketFactoryImpl.java | 74 --
.../flink/filesystem/DefaultRollingPolicy.java | 140 ----
.../inlong/sort/flink/filesystem/PartFileInfo.java | 48 --
.../sort/flink/filesystem/PartFileWriter.java | 130 ----
.../sort/flink/filesystem/RollingPolicy.java | 54 --
.../sort/flink/filesystem/StreamingFileSink.java | 225 ------
.../inlong/sort/flink/hive/HiveCommitter.java | 121 ----
.../flink/hive/HivePartitionBucketAssigner.java | 52 --
.../inlong/sort/flink/hive/HiveSinkHelper.java | 90 ---
.../apache/inlong/sort/flink/hive/HiveWriter.java | 271 --------
.../sort/flink/hive/formats/TextRowWriter.java | 323 ---------
.../sort/flink/hive/formats/orc/OrcBulkWriter.java | 74 --
.../hive/formats/orc/OrcBulkWriterFactory.java | 113 ---
.../flink/hive/formats/orc/OrcBulkWriterUtil.java | 97 ---
.../flink/hive/formats/orc/PhysicalWriterImpl.java | 400 -----------
.../sort/flink/hive/formats/orc/RowVectorizer.java | 523 --------------
.../hive/formats/parquet/ParquetRowWriter.java | 512 --------------
.../formats/parquet/ParquetRowWriterBuilder.java | 115 ---
.../formats/parquet/ParquetSchemaConverter.java | 627 -----------------
.../sort/flink/hive/partition/HivePartition.java | 97 ---
.../partition/JdbcHivePartitionCommitPolicy.java | 138 ----
.../hive/partition/JdbcHivePartitionTool.java | 92 ---
.../flink/hive/partition/PartitionCommitInfo.java | 63 --
.../hive/partition/PartitionCommitPolicy.java | 62 --
.../flink/hive/partition/PartitionComputer.java | 49 --
.../flink/hive/partition/PartitionPathUtils.java | 74 --
.../flink/hive/partition/RowPartitionComputer.java | 137 ----
.../inlong/sort/flink/kafka/KafkaSinkBuilder.java | 65 --
.../flink/pulsar/PulsarDeserializationSchema.java | 55 --
.../inlong/sort/flink/pulsar/PulsarOptions.java | 122 ----
.../sort/flink/pulsar/PulsarSourceFunction.java | 587 ----------------
.../inlong/sort/flink/pulsar/PulsarUtils.java | 243 -------
.../flink/pulsar/TDMQPulsarSourceFunction.java | 448 ------------
.../clickhouse/ClickHouseRowConverterTest.java | 66 --
.../inlong/sort/flink/doris/TestDorisSink.java | 66 --
.../inlong/sort/flink/hive/HiveSinkHelperTest.java | 96 ---
.../sort/flink/hive/formats/TextRowWriterTest.java | 153 ----
.../flink/hive/formats/orc/OrcBulkWriterTest.java | 164 -----
.../flink/hive/formats/orc/RowVectorizerTest.java | 219 ------
.../formats/parquet/ParquetBulkWriterTest.java | 174 -----
.../JdbcHivePartitionCommitPolicyTest.java | 65 --
.../sort-connectors/src/test/resources/testGzip.gz | Bin 38 -> 0 bytes
inlong-sort/sort-core/pom.xml | 144 ++--
.../main/java/org/apache/inlong/sort/Entrance.java | 60 ++
.../org/apache/inlong/sort/flink/Entrance.java | 159 -----
.../sort/flink/InLongMsgMixedSerializedRecord.java | 50 --
.../java/org/apache/inlong/sort/flink/Record.java | 92 ---
.../apache/inlong/sort/flink/SerializedRecord.java | 72 --
.../org/apache/inlong/sort/flink/SourceEvent.java | 81 ---
.../flink/deserialization/CallbackCollector.java | 50 --
.../deserialization/DeserializationSchema.java | 242 -------
.../sort/flink/deserialization/Deserializer.java | 27 -
.../deserialization/InLongMsgDeserializer.java | 40 --
.../InLongMsgMixedDeserializer.java | 114 ---
.../deserialization/MultiTenancyDeserializer.java | 109 ---
.../MultiTenancyInLongMsgMixedDeserializer.java | 155 -----
.../inlong/sort/flink/metrics/MetricData.java | 179 -----
.../sort/flink/metrics/MetricsAggregator.java | 121 ----
.../MetricsAssignerWithPeriodicWatermarks.java | 43 --
.../inlong/sort/flink/metrics/MetricsLogSink.java | 74 --
.../MultiTenantFunctionInitializationContext.java | 136 ----
.../clickhouse/ClickHouseMultiSinkFunction.java | 171 -----
.../multitenant/doris/DorisMultiSinkFunction.java | 166 -----
.../multitenant/hive/HiveMultiTenantCommitter.java | 167 -----
.../multitenant/hive/HiveMultiTenantWriter.java | 250 -------
.../pulsar/MultiTenancyPulsarConsumer.java | 177 -----
.../pulsar/MultiTopicPulsarSourceFunction.java | 207 ------
.../SerializedRecordDeserializationSchema.java | 47 --
.../tubemq/MultiTenancyTubeConsumer.java | 420 -----------
.../tubemq/MultiTopicTubeSourceFunction.java | 346 ----------
.../tubemq/TubeSubscriptionDescription.java | 203 ------
.../transformation/FieldMappingTransformer.java | 150 ----
.../flink/transformation/RecordTransformer.java | 147 ----
.../sort}/function/RegexpReplaceFirstFunction.java | 2 +-
.../inlong/sort/meta/MetaDataFlowInfoListener.java | 73 --
.../org/apache/inlong/sort/meta/MetaManager.java | 252 -------
.../org/apache/inlong/sort/meta/MetaWatcher.java | 44 --
.../sort/meta/zookeeper/ZookeeperMetaWatcher.java | 167 -----
.../sort/meta/zookeeper/ZookeeperWatcherUtils.java | 377 ----------
.../org/apache/inlong/sort}/parser/Parser.java | 4 +-
.../inlong/sort}/parser/impl/FlinkSqlParser.java | 65 +-
.../sort}/parser/result/FlinkSqlParseResult.java | 6 +-
.../inlong/sort}/parser/result/ParseResult.java | 2 +-
.../org/apache/inlong/sort/util/CommonUtils.java | 128 ----
.../apache/inlong/sort/util/CountBasedSampler.java | 51 --
.../java/org/apache/inlong/sort/util/Sampler.java | 33 -
.../org/apache/inlong/sort/util/TestingUtils.java | 86 ---
.../sort-core/src/main/resources/log4j.properties | 28 +
.../InLongMsgMixedDeserializerTest.java | 156 -----
...MultiTenancyInLongMsgMixedDeserializerTest.java | 103 ---
.../inlong/sort/flink/metrics/MetricDataTest.java | 32 -
.../sort/flink/metrics/MetricsLogSinkTest.java | 58 --
.../flink/multitenant/hive/HiveSinkITCase.java | 463 -------------
.../pulsar/MultiTopicPulsarSourceFunctionTest.java | 225 ------
.../pulsar/PulsarTestMetaManagerUtil.java | 89 ---
.../multitenant/pulsar/TestSourceContext.java | 71 --
.../tubemq/MultiTenancyTubeConsumerTest.java | 296 --------
.../tubemq/MultiTopicTubeSourceFunctionTest.java | 186 -----
.../tubemq/TubeSubscriptionDescriptionTest.java | 103 ---
.../FieldMappingTransformerTest.java | 96 ---
.../transformation/RecordTransformerTest.java | 191 -----
.../sort}/function/CascadeFunctionWrapperTest.java | 4 +-
.../function/RegexpReplaceFirstFunctionTest.java | 4 +-
.../sort}/function/RegexpReplaceFunctionTest.java | 4 +-
.../sort}/function/SplitIndexFunctionTest.java | 4 +-
.../apache/inlong/sort/meta/MetaManagerTest.java | 181 -----
.../apache/inlong/sort}/parser/AllMigrateTest.java | 9 +-
.../sort}/parser/ClickHouseSqlParserTest.java | 12 +-
.../sort}/parser/DistinctNodeSqlParseTest.java | 14 +-
.../inlong/sort}/parser/FilterParseTest.java | 10 +-
.../inlong/sort}/parser/FlinkSqlParserTest.java | 8 +-
.../sort}/parser/FullOuterJoinSqlParseTest.java | 14 +-
.../sort}/parser/HbaseLoadFlinkSqlParseTest.java | 14 +-
.../sort}/parser/IcebergNodeSqlParserTest.java | 10 +-
.../parser/InnerJoinRelationShipSqlParseTest.java | 16 +-
.../sort}/parser/LeftOuterJoinSqlParseTest.java | 14 +-
.../inlong/sort}/parser/MetaFieldSyncTest.java | 8 +-
.../parser/MongoExtractFlinkSqlParseTest.java | 55 +-
.../parser/PostgresExtractFlinkSqlParseTest.java | 14 +-
.../parser/PostgresLoadNodeFlinkSqlParseTest.java | 12 +-
.../inlong/sort}/parser/PulsarSqlParserTest.java | 8 +-
.../sort}/parser/RightOuterJoinSqlParseTest.java | 14 +-
.../inlong/sort/util/TestMetaManagerUtil.java | 65 --
.../inlong/sort/util/ZooKeeperTestEnvironment.java | 158 -----
inlong-sort/sort-single-tenant/pom.xml | 345 ---------
.../inlong/sort/singletenant/flink/Entrance.java | 297 --------
.../sort/singletenant/flink/SerializedRecord.java | 61 --
.../clickhouse/ClickhouseRowSinkFunction.java | 58 --
.../flink/deserialization/CallbackCollector.java | 50 --
.../CanalDeserializationSchemaBuilder.java | 150 ----
...stomDateFormatDeserializationSchemaWrapper.java | 114 ---
.../DebeziumDeserializationSchemaBuilder.java | 135 ----
.../deserialization/DeserializationFunction.java | 142 ----
.../DeserializationSchemaFactory.java | 104 ---
.../deserialization/FieldMappingTransformer.java | 140 ----
.../flink/deserialization/ListCollector.java | 43 --
.../RowDataToRowDeserializationSchemaWrapper.java | 79 ---
.../flink/pulsar/PulsarSourceBuilder.java | 117 ----
.../CanalSerializationSchemaBuilder.java | 88 ---
...CustomDateFormatSerializationSchemaWrapper.java | 88 ---
.../DebeziumSerializationSchemaBuilder.java | 64 --
.../RowToRowDataSerializationSchemaWrapper.java | 52 --
.../serialization/SerializationSchemaFactory.java | 108 ---
.../flink/transformation/Transformer.java | 123 ----
.../sort/singletenant/flink/utils/CommonUtils.java | 302 --------
.../inlong/sort/flink/kafka/KafkaSinkTestBase.java | 279 --------
.../sort/flink/kafka/RowToAvroKafkaSinkTest.java | 129 ----
.../sort/flink/kafka/RowToCanalKafkaSinkTest.java | 88 ---
.../kafka/RowToDebeziumJsonKafkaSinkTest.java | 90 ---
.../sort/flink/kafka/RowToJsonKafkaSinkTest.java | 89 ---
.../sort/flink/kafka/RowToStringKafkaSinkTest.java | 68 --
.../singletenant/flink/DebeziumToCanalITCase.java | 255 -------
.../singletenant/flink/WholeDBMigrationITCase.java | 293 --------
.../hive/HiveSinkWithoutPartitionTestCase.java | 238 -------
.../deserialization/AvroDeserializationTest.java | 102 ---
.../deserialization/CanalDeserializationTest.java | 224 ------
...DateFormatDeserializationSchemaWrapperTest.java | 65 --
.../DebeziumDeserializationTest.java | 150 ----
.../DeserializationFunctionTest.java | 152 ----
.../FieldMappingTransformerTest.java | 72 --
.../deserialization/JsonDeserializationTest.java | 112 ---
.../serialization/CanalSerializationTest.java | 82 ---
...omDateFormatSerializationSchemaWrapperTest.java | 66 --
.../flink/transformation/TransformerTest.java | 121 ----
.../singletenant/flink/utils/CommonUtilsTest.java | 357 ----------
.../sort/singletenant/flink/utils/NetUtils.java | 77 ---
.../src/test/resources/log4j-test.properties | 29 -
licenses/LICENSE | 236 +++----
licenses/inlong-agent/LICENSE | 236 +++----
licenses/inlong-audit/LICENSE | 236 +++----
licenses/inlong-dashboard/LICENSE | 236 +++----
licenses/inlong-dataproxy/LICENSE | 236 +++----
licenses/inlong-manager/LICENSE | 236 +++----
licenses/inlong-sort-standalone/LICENSE | 236 +++----
licenses/inlong-sort/LICENSE | 241 ++++---
licenses/inlong-sort/NOTICE | 24 -
licenses/inlong-tubemq-manager/LICENSE | 236 +++----
licenses/inlong-tubemq-server/LICENSE | 236 +++----
353 files changed, 6137 insertions(+), 30410 deletions(-)
diff --git a/LICENSE b/LICENSE
index 1ec17c6eb..0c4b73b75 100644
--- a/LICENSE
+++ b/LICENSE
@@ -353,130 +353,130 @@
Source : checkstyle 8.44 (Modified from src/main/resources/google_checks.xml)
License : https://github.com/checkstyle/checkstyle/blob/master/LICENSE.apache20
-1.3.5 inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSnapshotSplit.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlSourceReaderContext.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/DeserializationRuntimeConverterFactory.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlTypeUtils.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/StatementUtils.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlSchema.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlDeserializationConverterFactory.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/DeserializationRuntimeConverter.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/JdbcConnectionFactory.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/FinishedSnapshotSplitsReportEvent.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/MySqlSource.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/HybridPendingSplitsState.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/ConnectionPoolId.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/JsonDebeziumDeserializationSchema.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/ConnectionPools.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlRecords.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/reader/SnapshotSplitReader.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/enumerator/MySqlSourceEnumerator.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlTableDefinition.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/BinlogSplitMetaRequestEvent.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/Validator.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/FlinkOffsetBackingStore.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/context/StatefulTaskContext.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/DebeziumUtils.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSplit.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlHybridSplitAssigner.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/MySqlSourceConfig.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlReadableMetadata.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlTableSource.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlSourceReader.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/MySqlSourceOptions.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/SchemaRecord.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlBinlogSplit.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSnapshotSplitState.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/ChunkUtils.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/LatestFinishedSplitsSizeRequestEvent.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/dispatcher/EventDispatcherImpl.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/reader/BinlogSplitReader.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/FlinkDatabaseSchemaHistory.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/utils/TemporalConversions.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/MySqlSnapshotSplitReadTask.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/RowDataDebeziumDeserializeSchema.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/offset/BinlogOffsetSerializer.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/ChunkRange.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/TableDiscoveryUtils.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/RecordUtils.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/metrics/MySqlSourceReaderMetrics.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/PendingSplitsStateSerializer.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/BinlogPendingSplitsState.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/DebeziumSourceFunction.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlSplitReader.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/utils/DatabaseHistoryUtil.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/context/MySqlErrorHandler.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlBinlogSplitAssigner.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/AppendMetadataCollector.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/SerializerUtils.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/MySqlSource.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/AssignerStatus.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/DebeziumDeserializationSchema.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/SnapshotPendingSplitsState.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlSnapshotSplitAssigner.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/StringDebeziumDeserializationSchema.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/FlinkDatabaseHistory.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/ChunkSplitter.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/FinishedSnapshotSplitsAckEvent.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlRecordEmitter.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/offset/BinlogOffset.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/StartupMode.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlTableInlongSourceFactory.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/OldFieldMetadataConverter.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/ObjectUtils.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/BinlogSplitMetaEvent.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/PendingSplitsState.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/MySqlSourceConfigFactory.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSplitState.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/SuspendBinlogReaderEvent.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/FinishedSnapshotSplitsRequestEvent.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSplitSerializer.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/StartupOptions.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/JdbcUrlUtils.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/ServerIdRange.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/EmbeddedFlinkDatabaseHistory.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlBinlogSplitState.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/MySqlValidator.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/WakeupReaderEvent.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/Handover.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/reader/DebeziumReader.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/dispatcher/SignalEventDispatcher.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlSplitAssigner.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/SeekBinlogToTimestampFilter.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/MySqlBinlogSplitReadTask.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/LatestFinishedSplitsSizeEvent.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/PooledDataSourceFactory.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/MetadataConverter.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/JdbcConnectionPools.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/MySqlSourceBuilder.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/SuspendBinlogReaderAckEvent.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/DebeziumOptions.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/context/MySqlTaskContextImpl.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlFieldDefinition.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/FinishedSnapshotSplitInfo.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumOffset.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumChangeFetcher.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumChangeConsumer.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumOffsetSerializer.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/history/FlinkJsonTableChangeSerializer.java
+1.3.5 inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSnapshotSplit.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlSourceReaderContext.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/DeserializationRuntimeConverterFactory.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlTypeUtils.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/StatementUtils.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlSchema.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlDeserializationConverterFactory.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/DeserializationRuntimeConverter.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/JdbcConnectionFactory.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/FinishedSnapshotSplitsReportEvent.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/MySqlSource.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/HybridPendingSplitsState.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/ConnectionPoolId.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/JsonDebeziumDeserializationSchema.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/ConnectionPools.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlRecords.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/reader/SnapshotSplitReader.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/enumerator/MySqlSourceEnumerator.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlTableDefinition.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/BinlogSplitMetaRequestEvent.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/Validator.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/FlinkOffsetBackingStore.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/context/StatefulTaskContext.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/DebeziumUtils.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSplit.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlHybridSplitAssigner.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/MySqlSourceConfig.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlReadableMetadata.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlTableSource.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlSourceReader.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/MySqlSourceOptions.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/SchemaRecord.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlBinlogSplit.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSnapshotSplitState.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/ChunkUtils.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/LatestFinishedSplitsSizeRequestEvent.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/dispatcher/EventDispatcherImpl.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/reader/BinlogSplitReader.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/FlinkDatabaseSchemaHistory.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/utils/TemporalConversions.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/MySqlSnapshotSplitReadTask.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/RowDataDebeziumDeserializeSchema.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/offset/BinlogOffsetSerializer.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/ChunkRange.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/TableDiscoveryUtils.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/RecordUtils.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/metrics/MySqlSourceReaderMetrics.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/PendingSplitsStateSerializer.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/BinlogPendingSplitsState.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/DebeziumSourceFunction.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlSplitReader.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/utils/DatabaseHistoryUtil.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/context/MySqlErrorHandler.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlBinlogSplitAssigner.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/AppendMetadataCollector.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/SerializerUtils.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/MySqlSource.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/AssignerStatus.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/DebeziumDeserializationSchema.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/SnapshotPendingSplitsState.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlSnapshotSplitAssigner.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/StringDebeziumDeserializationSchema.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/FlinkDatabaseHistory.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/ChunkSplitter.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/FinishedSnapshotSplitsAckEvent.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlRecordEmitter.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/offset/BinlogOffset.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/StartupMode.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlTableInlongSourceFactory.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/OldFieldMetadataConverter.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/ObjectUtils.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/BinlogSplitMetaEvent.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/PendingSplitsState.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/MySqlSourceConfigFactory.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSplitState.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/SuspendBinlogReaderEvent.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/FinishedSnapshotSplitsRequestEvent.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSplitSerializer.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/StartupOptions.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/JdbcUrlUtils.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/ServerIdRange.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/EmbeddedFlinkDatabaseHistory.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlBinlogSplitState.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/MySqlValidator.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/WakeupReaderEvent.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/Handover.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/reader/DebeziumReader.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/dispatcher/SignalEventDispatcher.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlSplitAssigner.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/SeekBinlogToTimestampFilter.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/MySqlBinlogSplitReadTask.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/LatestFinishedSplitsSizeEvent.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/PooledDataSourceFactory.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/MetadataConverter.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/JdbcConnectionPools.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/MySqlSourceBuilder.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/SuspendBinlogReaderAckEvent.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/DebeziumOptions.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/context/MySqlTaskContextImpl.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlFieldDefinition.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/FinishedSnapshotSplitInfo.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumOffset.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumChangeFetcher.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumChangeConsumer.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumOffsetSerializer.java
+ inlong-sort/sort-connectors/sort-mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/history/FlinkJsonTableChangeSerializer.java
Source : flink-cdc-connectors 2.0.1 (Please note that the software have been modified.)
License : https://github.com/ververica/flink-cdc-connectors/blob/master/LICENSE
-1.3.6 inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveValidator.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveTableSink.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveTableMetaStoreFactory.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveRowPartitionComputer.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveRowDataPartitionComputer.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveOptions.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HadoopFileSystemFactory.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/table/catalog/factories/HiveTableInlongFactory.java
+1.3.6 inlong-sort/sort-connectors/sort-hive/src/main/java/org/apache/inlong/sort/hive/HiveValidator.java
+ inlong-sort/sort-connectors/sort-hive/src/main/java/org/apache/inlong/sort/hive/HiveTableSink.java
+ inlong-sort/sort-connectors/sort-hive/src/main/java/org/apache/inlong/sort/hive/HiveTableMetaStoreFactory.java
+ inlong-sort/sort-connectors/sort-hive/src/main/java/org/apache/inlong/sort/hive/HiveRowPartitionComputer.java
+ inlong-sort/sort-connectors/sort-hive/src/main/java/org/apache/inlong/sort/hive/HiveRowDataPartitionComputer.java
+ inlong-sort/sort-connectors/sort-hive/src/main/java/org/apache/inlong/sort/hive/HiveOptions.java
+ inlong-sort/sort-connectors/sort-hive/src/main/java/org/apache/inlong/sort/hive/HadoopFileSystemFactory.java
+ inlong-sort/sort-connectors/sort-hive/src/main/java/org/apache/inlong/sort/hive/table/HiveTableInlongFactory.java
Source : flink-connector-hive 1.13.5 (Please note that the software have been modified.)
License : https://github.com/apache/flink/blob/master/LICENSE
-1.3.7 inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/flink/pulsar/table/DynamicPulsarSerializationSchema.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/flink/pulsar/table/PulsarDynamicTableFactory.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/flink/pulsar/table/PulsarDynamicTableSink.java
- inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/flink/pulsar/table/UpsertPulsarDynamicTableFactory.java
+1.3.7 inlong-sort/sort-connectors/sort-pulsar/src/main/java/org/apache/inlong/sort/pulsar/table/DynamicPulsarSerializationSchema.java
+ inlong-sort/sort-connectors/sort-pulsar/src/main/java/org/apache/inlong/sort/pulsar/table/PulsarDynamicTableFactory.java
+ inlong-sort/sort-connectors/sort-pulsar/src/main/java/org/apache/inlong/sort/pulsar/table/PulsarDynamicTableSink.java
+ inlong-sort/sort-connectors/sort-pulsar/src/main/java/org/apache/inlong/sort/pulsar/table/UpsertPulsarDynamicTableFactory.java
Source : pulsar-flink-connector_2.11 1.13.6.1-rc9 (Please note that the software have been modified.)
License : https://github.com/streamnative/pulsar-flink/blob/master/LICENSE
diff --git a/inlong-sort/pom.xml b/inlong-sort/pom.xml
index 3d1f326e6..12182abde 100644
--- a/inlong-sort/pom.xml
+++ b/inlong-sort/pom.xml
@@ -36,11 +36,10 @@
<modules>
<module>sort-api</module>
<module>sort-common</module>
- <module>sort-core</module>
- <module>sort-dist</module>
<module>sort-formats</module>
- <module>sort-single-tenant</module>
<module>sort-connectors</module>
+ <module>sort-core</module>
+ <module>sort-dist</module>
</modules>
<dependencies>
diff --git a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/ClickHouseLoadNode.java b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/ClickHouseLoadNode.java
index 8bad0b2e0..a2c3bc934 100644
--- a/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/ClickHouseLoadNode.java
+++ b/inlong-sort/sort-common/src/main/java/org/apache/inlong/sort/protocol/node/load/ClickHouseLoadNode.java
@@ -90,7 +90,7 @@ public class ClickHouseLoadNode extends LoadNode implements Serializable {
public Map<String, String> tableOptions() {
Map<String, String> options = super.tableOptions();
options.put("connector", "jdbc-inlong");
- options.put("dialect-impl", "org.apache.inlong.sort.flink.clickhouse.table.ClickHouseDialect");
+ options.put("dialect-impl", "org.apache.inlong.sort.jdbc.dialect.ClickHouseDialect");
options.put("url", url);
options.put("table-name", tableName);
options.put("username", userName);
diff --git a/inlong-sort/sort-connectors/hive/pom.xml b/inlong-sort/sort-connectors/hive/pom.xml
new file mode 100644
index 000000000..6683ba09a
--- /dev/null
+++ b/inlong-sort/sort-connectors/hive/pom.xml
@@ -0,0 +1,89 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>sort-connectors</artifactId>
+ <groupId>org.apache.inlong</groupId>
+ <version>1.2.0-incubating-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+
+ <artifactId>sort-connector-hive</artifactId>
+ <name>Apache InLong - Sort-connector-hive</name>
+ <packaging>jar</packaging>
+
+ <dependencies>
+
+ <dependency>
+ <groupId>org.apache.flink</groupId>
+ <artifactId>flink-connector-hive_${flink.scala.binary.version}</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hive</groupId>
+ <artifactId>hive-exec</artifactId>
+ <exclusions>
+ <exclusion>
+ <groupId>org.apache.calcite</groupId>
+ <artifactId>*</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.calcite</groupId>
+ <artifactId>calcite-druid</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ <version>${hadoop.version}</version>
+ <exclusions>
+ <exclusion>
+ <groupId>org.apache.avro</groupId>
+ <artifactId>avro</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-core</artifactId>
+ <version>2.10.1</version>
+ <scope>compile</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hive</groupId>
+ <artifactId>hive-jdbc</artifactId>
+ <exclusions>
+ <exclusion>
+ <artifactId>parquet-hadoop-bundle</artifactId>
+ <groupId>org.apache.parquet</groupId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ </dependencies>
+
+</project>
\ No newline at end of file
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HadoopFileSystemFactory.java b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HadoopFileSystemFactory.java
similarity index 96%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HadoopFileSystemFactory.java
rename to inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HadoopFileSystemFactory.java
index aa271b4aa..612d5c0e4 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HadoopFileSystemFactory.java
+++ b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HadoopFileSystemFactory.java
@@ -15,10 +15,8 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.connectors.hive;
+package org.apache.inlong.sort.hive;
-import java.io.IOException;
-import java.net.URI;
import org.apache.flink.connectors.hive.JobConfWrapper;
import org.apache.flink.core.fs.FileSystem;
import org.apache.flink.runtime.fs.hdfs.HadoopFileSystem;
@@ -26,6 +24,9 @@ import org.apache.flink.table.filesystem.FileSystemFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
+import java.io.IOException;
+import java.net.URI;
+
/**
* Hive {@link FileSystemFactory}, hive need use job conf to create file system.
*/
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveOptions.java b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveOptions.java
similarity index 98%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveOptions.java
rename to inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveOptions.java
index 167b56895..f4ba6417f 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveOptions.java
+++ b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveOptions.java
@@ -15,13 +15,13 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.connectors.hive;
-
-import static org.apache.flink.configuration.ConfigOptions.key;
+package org.apache.inlong.sort.hive;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
+import static org.apache.flink.configuration.ConfigOptions.key;
+
/**
* This class holds configuration constants used by hive connector.
*/
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveRowDataPartitionComputer.java b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveRowDataPartitionComputer.java
similarity index 98%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveRowDataPartitionComputer.java
rename to inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveRowDataPartitionComputer.java
index 0f1beae54..f38a415c5 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveRowDataPartitionComputer.java
+++ b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveRowDataPartitionComputer.java
@@ -15,10 +15,8 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.connectors.hive;
+package org.apache.inlong.sort.hive;
-import java.util.Arrays;
-import java.util.LinkedHashMap;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.table.catalog.hive.client.HiveShim;
import org.apache.flink.table.data.RowData;
@@ -30,6 +28,9 @@ import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.utils.TypeConversions;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import java.util.Arrays;
+import java.util.LinkedHashMap;
+
/**
* A {@link RowDataPartitionComputer} that converts Flink objects to Hive objects before computing
* the partition value strings.
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveRowPartitionComputer.java b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveRowPartitionComputer.java
similarity index 97%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveRowPartitionComputer.java
rename to inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveRowPartitionComputer.java
index 4c8b133cf..9005d0274 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveRowPartitionComputer.java
+++ b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveRowPartitionComputer.java
@@ -15,9 +15,8 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.connectors.hive;
+package org.apache.inlong.sort.hive;
-import java.util.LinkedHashMap;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.table.catalog.hive.client.HiveShim;
import org.apache.flink.table.filesystem.RowPartitionComputer;
@@ -27,6 +26,8 @@ import org.apache.flink.table.types.DataType;
import org.apache.flink.types.Row;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import java.util.LinkedHashMap;
+
/**
* A RowPartitionComputer that converts Flink objects to Hive objects before computing the partition
* value strings.
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveTableMetaStoreFactory.java b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveTableMetaStoreFactory.java
similarity index 95%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveTableMetaStoreFactory.java
rename to inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveTableMetaStoreFactory.java
index 72724ce11..37088a680 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveTableMetaStoreFactory.java
+++ b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveTableMetaStoreFactory.java
@@ -15,12 +15,8 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.connectors.hive;
+package org.apache.inlong.sort.hive;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.Optional;
import org.apache.flink.connectors.hive.JobConfWrapper;
import org.apache.flink.connectors.hive.util.HiveConfUtils;
import org.apache.flink.core.fs.Path;
@@ -34,6 +30,11 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.mapred.JobConf;
import org.apache.thrift.TException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.Optional;
+
/**
* Hive {@link TableMetaStoreFactory}, use {@link HiveMetastoreClientWrapper} to communicate with
* hive meta store.
@@ -83,9 +84,9 @@ public class HiveTableMetaStoreFactory implements TableMetaStoreFactory {
return Optional.of(
new Path(
client.getPartition(
- database,
- tableName,
- new ArrayList<>(partSpec.values()))
+ database,
+ tableName,
+ new ArrayList<>(partSpec.values()))
.getSd()
.getLocation()));
} catch (NoSuchObjectException ignore) {
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveTableSink.java b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveTableSink.java
similarity index 97%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveTableSink.java
rename to inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveTableSink.java
index 14f2d4a71..8da4bffdd 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveTableSink.java
+++ b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveTableSink.java
@@ -15,29 +15,12 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.connectors.hive;
+package org.apache.inlong.sort.hive;
-import static org.apache.flink.table.catalog.hive.util.HiveTableUtil.checkAcidTable;
-import static org.apache.flink.table.filesystem.FileSystemOptions.SINK_ROLLING_POLICY_CHECK_INTERVAL;
-import static org.apache.flink.table.filesystem.FileSystemOptions.SINK_ROLLING_POLICY_FILE_SIZE;
-import static org.apache.flink.table.filesystem.FileSystemOptions.SINK_ROLLING_POLICY_ROLLOVER_INTERVAL;
-import static org.apache.flink.table.filesystem.stream.compact.CompactOperator.convertToUncompacted;
-import static org.apache.inlong.sort.singletenant.flink.connectors.hive.HiveOptions.HIVE_IGNORE_ALL_CHANGELOG;
-
-import java.io.IOException;
-import java.io.UncheckedIOException;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Properties;
-import java.util.UUID;
-import javax.annotation.Nullable;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.BulkWriter;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.connectors.hive.FlinkHiveException;
-import org.apache.flink.connectors.hive.HiveOptions;
import org.apache.flink.connectors.hive.read.HiveCompactReaderFactory;
import org.apache.flink.connectors.hive.util.HiveConfUtils;
import org.apache.flink.connectors.hive.util.JobConfUtils;
@@ -99,6 +82,23 @@ import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import javax.annotation.Nullable;
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Properties;
+import java.util.UUID;
+
+import static org.apache.flink.table.catalog.hive.util.HiveTableUtil.checkAcidTable;
+import static org.apache.flink.table.filesystem.FileSystemOptions.SINK_ROLLING_POLICY_CHECK_INTERVAL;
+import static org.apache.flink.table.filesystem.FileSystemOptions.SINK_ROLLING_POLICY_FILE_SIZE;
+import static org.apache.flink.table.filesystem.FileSystemOptions.SINK_ROLLING_POLICY_ROLLOVER_INTERVAL;
+import static org.apache.flink.table.filesystem.stream.compact.CompactOperator.convertToUncompacted;
+import static org.apache.inlong.sort.hive.HiveOptions.HIVE_IGNORE_ALL_CHANGELOG;
+
/**
* Table sink to write to Hive tables.
*/
@@ -155,7 +155,7 @@ public class HiveTableSink implements DynamicTableSink, SupportsPartitioning, Su
dbName = identifier.getDatabaseName();
}
try (HiveMetastoreClientWrapper client =
- HiveMetastoreClientFactory.create(HiveConfUtils.create(jobConf), hiveVersion)) {
+ HiveMetastoreClientFactory.create(HiveConfUtils.create(jobConf), hiveVersion)) {
Table table = client.getTable(dbName, identifier.getObjectName());
StorageDescriptor sd = table.getSd();
@@ -297,9 +297,9 @@ public class HiveTableSink implements DynamicTableSink, SupportsPartitioning, Su
if (bulkFactory.isPresent()) {
builder =
StreamingFileSink.forBulkFormat(
- path,
- new FileSystemTableSink.ProjectionBulkFactory(
- bulkFactory.get(), partComputer))
+ path,
+ new FileSystemTableSink.ProjectionBulkFactory(
+ bulkFactory.get(), partComputer))
.withBucketAssigner(assigner)
.withRollingPolicy(rollingPolicy)
.withOutputFileConfig(outputFileConfig);
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveValidator.java b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveValidator.java
similarity index 96%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveValidator.java
rename to inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveValidator.java
index f9afa11e3..fb9f5f781 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/HiveValidator.java
+++ b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/HiveValidator.java
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.connectors.hive;
+package org.apache.inlong.sort.hive;
import org.apache.flink.table.descriptors.ConnectorDescriptorValidator;
import org.apache.flink.table.descriptors.DescriptorProperties;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/table/catalog/factories/HiveTableInlongFactory.java b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/table/HiveTableInlongFactory.java
similarity index 96%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/table/catalog/factories/HiveTableInlongFactory.java
rename to inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/table/HiveTableInlongFactory.java
index 9b9a3b9dd..61d1dd826 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/connectors/hive/table/catalog/factories/HiveTableInlongFactory.java
+++ b/inlong-sort/sort-connectors/hive/src/main/java/org/apache/inlong/sort/hive/table/HiveTableInlongFactory.java
@@ -15,22 +15,9 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.connectors.hive.table.catalog.factories;
-
-import static org.apache.flink.table.catalog.hive.factories.HiveCatalogFactoryOptions.DEFAULT_DATABASE;
-import static org.apache.flink.table.catalog.hive.factories.HiveCatalogFactoryOptions.HADOOP_CONF_DIR;
-import static org.apache.flink.table.catalog.hive.factories.HiveCatalogFactoryOptions.HIVE_CONF_DIR;
-import static org.apache.flink.table.catalog.hive.factories.HiveCatalogFactoryOptions.HIVE_VERSION;
-import static org.apache.flink.table.factories.FactoryUtil.PROPERTY_VERSION;
-import static org.apache.flink.table.filesystem.FileSystemOptions.STREAMING_SOURCE_ENABLE;
-import static org.apache.flink.table.filesystem.FileSystemOptions.STREAMING_SOURCE_PARTITION_INCLUDE;
-import static org.apache.inlong.sort.singletenant.flink.connectors.hive.HiveOptions.HIVE_DATABASE;
+package org.apache.inlong.sort.hive.table;
import com.google.common.base.Preconditions;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connectors.hive.HiveLookupTableSource;
@@ -47,7 +34,21 @@ import org.apache.flink.table.factories.FactoryUtil;
import org.apache.flink.table.filesystem.FileSystemOptions;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.mapred.JobConf;
-import org.apache.inlong.sort.singletenant.flink.connectors.hive.HiveTableSink;
+import org.apache.inlong.sort.hive.HiveTableSink;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import static org.apache.flink.table.catalog.hive.factories.HiveCatalogFactoryOptions.DEFAULT_DATABASE;
+import static org.apache.flink.table.catalog.hive.factories.HiveCatalogFactoryOptions.HADOOP_CONF_DIR;
+import static org.apache.flink.table.catalog.hive.factories.HiveCatalogFactoryOptions.HIVE_CONF_DIR;
+import static org.apache.flink.table.catalog.hive.factories.HiveCatalogFactoryOptions.HIVE_VERSION;
+import static org.apache.flink.table.factories.FactoryUtil.PROPERTY_VERSION;
+import static org.apache.flink.table.filesystem.FileSystemOptions.STREAMING_SOURCE_ENABLE;
+import static org.apache.flink.table.filesystem.FileSystemOptions.STREAMING_SOURCE_PARTITION_INCLUDE;
+import static org.apache.inlong.sort.hive.HiveOptions.HIVE_DATABASE;
/**
* DynamicTableSourceFactory for hive table source
diff --git a/inlong-sort/sort-single-tenant/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory b/inlong-sort/sort-connectors/hive/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
similarity index 86%
copy from inlong-sort/sort-single-tenant/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
copy to inlong-sort/sort-connectors/hive/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
index 9c7b7e251..3f4336644 100644
--- a/inlong-sort/sort-single-tenant/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
+++ b/inlong-sort/sort-connectors/hive/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
@@ -30,6 +30,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-org.apache.inlong.sort.singletenant.flink.cdc.mysql.table.MySqlTableInlongSourceFactory
-org.apache.inlong.sort.singletenant.flink.connectors.hive.table.catalog.factories.HiveTableInlongFactory
-org.apache.inlong.sort.flink.kafka.KafkaDynamicTableFactory
\ No newline at end of file
+org.apache.inlong.sort.hive.table.HiveTableInlongFactory
\ No newline at end of file
diff --git a/inlong-sort/sort-connectors/jdbc/pom.xml b/inlong-sort/sort-connectors/jdbc/pom.xml
new file mode 100644
index 000000000..799b4b679
--- /dev/null
+++ b/inlong-sort/sort-connectors/jdbc/pom.xml
@@ -0,0 +1,57 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>sort-connectors</artifactId>
+ <groupId>org.apache.inlong</groupId>
+ <version>1.2.0-incubating-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+
+ <artifactId>sort-connector-jdbc</artifactId>
+ <name>Apache InLong - Sort-connector-jdbc</name>
+ <packaging>jar</packaging>
+
+ <dependencies>
+ <!--for clickhouse-->
+ <dependency>
+ <groupId>ru.yandex.clickhouse</groupId>
+ <artifactId>clickhouse-jdbc</artifactId>
+ </dependency>
+ <!--for postgresql-->
+ <dependency>
+ <groupId>com.ververica</groupId>
+ <artifactId>flink-connector-postgres-cdc</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.postgresql</groupId>
+ <artifactId>postgresql</artifactId>
+ </dependency>
+ <!--for jdbc-->
+ <dependency>
+ <groupId>org.apache.flink</groupId>
+ <artifactId>flink-connector-jdbc_${flink.scala.binary.version}</artifactId>
+ </dependency>
+ </dependencies>
+
+</project>
\ No newline at end of file
diff --git a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/clickhouse/table/ClickHouseRowConverter.java b/inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/clickhouse/ClickHouseRowConverter.java
similarity index 93%
rename from inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/clickhouse/table/ClickHouseRowConverter.java
rename to inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/clickhouse/ClickHouseRowConverter.java
index 70cad5423..198f22e77 100644
--- a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/clickhouse/table/ClickHouseRowConverter.java
+++ b/inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/clickhouse/ClickHouseRowConverter.java
@@ -1,40 +1,40 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.sort.flink.clickhouse.table;
-
-import org.apache.flink.connector.jdbc.internal.converter.AbstractJdbcRowConverter;
-import org.apache.flink.table.types.logical.RowType;
-
-/**
- * Runtime converter that responsible to convert between JDBC object and Flink internal object for
- * Derby.
- */
-public class ClickHouseRowConverter extends AbstractJdbcRowConverter {
-
- private static final long serialVersionUID = 1L;
-
- @Override
- public String converterName() {
- return "ClickHouse";
- }
-
- public ClickHouseRowConverter(RowType rowType) {
- super(rowType);
- }
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.sort.jdbc.clickhouse;
+
+import org.apache.flink.connector.jdbc.internal.converter.AbstractJdbcRowConverter;
+import org.apache.flink.table.types.logical.RowType;
+
+/**
+ * Runtime converter that responsible to convert between JDBC object and Flink internal object for
+ * Derby.
+ */
+public class ClickHouseRowConverter extends AbstractJdbcRowConverter {
+
+ private static final long serialVersionUID = 1L;
+
+ public ClickHouseRowConverter(RowType rowType) {
+ super(rowType);
+ }
+
+ @Override
+ public String converterName() {
+ return "ClickHouse";
+ }
}
\ No newline at end of file
diff --git a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/clickhouse/table/ClickHouseDialect.java b/inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/dialect/ClickHouseDialect.java
similarity index 93%
rename from inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/clickhouse/table/ClickHouseDialect.java
rename to inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/dialect/ClickHouseDialect.java
index 950e8e09c..781f0b078 100644
--- a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/clickhouse/table/ClickHouseDialect.java
+++ b/inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/dialect/ClickHouseDialect.java
@@ -1,122 +1,123 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.sort.flink.clickhouse.table;
-
-import org.apache.flink.connector.jdbc.internal.converter.JdbcRowConverter;
-import org.apache.flink.table.types.logical.LogicalTypeRoot;
-import org.apache.flink.table.types.logical.RowType;
-import org.apache.inlong.sort.flink.jdbc.table.AbstractJdbcDialect;
-
-import java.util.Arrays;
-import java.util.List;
-import java.util.Optional;
-
-/**
- * JDBC dialect for ClickHouse SQL.
- */
-public class ClickHouseDialect extends AbstractJdbcDialect {
-
- // Define MAX/MIN precision of TIMESTAMP type according to ClickHouse docs:
- // https://clickhouse.com/docs/zh/sql-reference/data-types/datetime64
- private static final int MAX_TIMESTAMP_PRECISION = 8;
- private static final int MIN_TIMESTAMP_PRECISION = 0;
-
- // Define MAX/MIN precision of DECIMAL type according to ClickHouse docs:
- // https://clickhouse.com/docs/zh/sql-reference/data-types/decimal/
- private static final int MAX_DECIMAL_PRECISION = 128;
- private static final int MIN_DECIMAL_PRECISION = 32;
-
- @Override
- public String dialectName() {
- return "ClickHouse";
- }
-
- @Override
- public boolean canHandle(String url) {
- return url.startsWith("jdbc:clickhouse:");
- }
-
- @Override
- public JdbcRowConverter getRowConverter(RowType rowType) {
- return new ClickHouseRowConverter(rowType);
- }
-
- @Override
- public String getLimitClause(long limit) {
- return "LIMIT " + limit;
- }
-
- @Override
- public Optional<String> defaultDriverName() {
- return Optional.of("ru.yandex.clickhouse.ClickHouseDriver");
- }
-
- @Override
- public String quoteIdentifier(String identifier) {
- return "`" + identifier + "`";
- }
-
- @Override
- public int maxDecimalPrecision() {
- return MAX_DECIMAL_PRECISION;
- }
-
- @Override
- public int minDecimalPrecision() {
- return MIN_DECIMAL_PRECISION;
-
- }
-
- @Override
- public int maxTimestampPrecision() {
- return MAX_TIMESTAMP_PRECISION;
- }
-
- @Override
- public int minTimestampPrecision() {
- return MIN_TIMESTAMP_PRECISION;
- }
-
- /**
- * Defines the unsupported types for the dialect.
- *
- * @return a list of logical type roots.
- */
- public List<LogicalTypeRoot> unsupportedTypes() {
- // ClickHouse support data type in
- // https://clickhouse.com/docs/en/sql-reference/data-types/
- return Arrays.asList(
- LogicalTypeRoot.BINARY,
- LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE,
- LogicalTypeRoot.TIMESTAMP_WITH_TIME_ZONE,
- LogicalTypeRoot.INTERVAL_YEAR_MONTH,
- LogicalTypeRoot.INTERVAL_DAY_TIME,
- LogicalTypeRoot.ARRAY,
- LogicalTypeRoot.MULTISET,
- LogicalTypeRoot.MAP,
- LogicalTypeRoot.ROW,
- LogicalTypeRoot.DISTINCT_TYPE,
- LogicalTypeRoot.STRUCTURED_TYPE,
- LogicalTypeRoot.NULL,
- LogicalTypeRoot.RAW,
- LogicalTypeRoot.SYMBOL,
- LogicalTypeRoot.UNRESOLVED);
- }
-
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.sort.jdbc.dialect;
+
+import org.apache.flink.connector.jdbc.internal.converter.JdbcRowConverter;
+import org.apache.flink.table.types.logical.LogicalTypeRoot;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.inlong.sort.jdbc.clickhouse.ClickHouseRowConverter;
+import org.apache.inlong.sort.jdbc.table.AbstractJdbcDialect;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+
+/**
+ * JDBC dialect for ClickHouse SQL.
+ */
+public class ClickHouseDialect extends AbstractJdbcDialect {
+
+ // Define MAX/MIN precision of TIMESTAMP type according to ClickHouse docs:
+ // https://clickhouse.com/docs/zh/sql-reference/data-types/datetime64
+ private static final int MAX_TIMESTAMP_PRECISION = 8;
+ private static final int MIN_TIMESTAMP_PRECISION = 0;
+
+ // Define MAX/MIN precision of DECIMAL type according to ClickHouse docs:
+ // https://clickhouse.com/docs/zh/sql-reference/data-types/decimal/
+ private static final int MAX_DECIMAL_PRECISION = 128;
+ private static final int MIN_DECIMAL_PRECISION = 32;
+
+ @Override
+ public String dialectName() {
+ return "ClickHouse";
+ }
+
+ @Override
+ public boolean canHandle(String url) {
+ return url.startsWith("jdbc:clickhouse:");
+ }
+
+ @Override
+ public JdbcRowConverter getRowConverter(RowType rowType) {
+ return new ClickHouseRowConverter(rowType);
+ }
+
+ @Override
+ public String getLimitClause(long limit) {
+ return "LIMIT " + limit;
+ }
+
+ @Override
+ public Optional<String> defaultDriverName() {
+ return Optional.of("ru.yandex.clickhouse.ClickHouseDriver");
+ }
+
+ @Override
+ public String quoteIdentifier(String identifier) {
+ return "`" + identifier + "`";
+ }
+
+ @Override
+ public int maxDecimalPrecision() {
+ return MAX_DECIMAL_PRECISION;
+ }
+
+ @Override
+ public int minDecimalPrecision() {
+ return MIN_DECIMAL_PRECISION;
+
+ }
+
+ @Override
+ public int maxTimestampPrecision() {
+ return MAX_TIMESTAMP_PRECISION;
+ }
+
+ @Override
+ public int minTimestampPrecision() {
+ return MIN_TIMESTAMP_PRECISION;
+ }
+
+ /**
+ * Defines the unsupported types for the dialect.
+ *
+ * @return a list of logical type roots.
+ */
+ public List<LogicalTypeRoot> unsupportedTypes() {
+ // ClickHouse support data type in
+ // https://clickhouse.com/docs/en/sql-reference/data-types/
+ return Arrays.asList(
+ LogicalTypeRoot.BINARY,
+ LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE,
+ LogicalTypeRoot.TIMESTAMP_WITH_TIME_ZONE,
+ LogicalTypeRoot.INTERVAL_YEAR_MONTH,
+ LogicalTypeRoot.INTERVAL_DAY_TIME,
+ LogicalTypeRoot.ARRAY,
+ LogicalTypeRoot.MULTISET,
+ LogicalTypeRoot.MAP,
+ LogicalTypeRoot.ROW,
+ LogicalTypeRoot.DISTINCT_TYPE,
+ LogicalTypeRoot.STRUCTURED_TYPE,
+ LogicalTypeRoot.NULL,
+ LogicalTypeRoot.RAW,
+ LogicalTypeRoot.SYMBOL,
+ LogicalTypeRoot.UNRESOLVED);
+ }
+
+}
diff --git a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/jdbc/table/AbstractJdbcDialect.java b/inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/table/AbstractJdbcDialect.java
similarity index 96%
rename from inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/jdbc/table/AbstractJdbcDialect.java
rename to inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/table/AbstractJdbcDialect.java
index 27929ecd6..856acd8ea 100644
--- a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/jdbc/table/AbstractJdbcDialect.java
+++ b/inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/table/AbstractJdbcDialect.java
@@ -1,103 +1,103 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.sort.flink.jdbc.table;
-
-import org.apache.flink.connector.jdbc.dialect.JdbcDialect;
-import org.apache.flink.table.api.TableSchema;
-import org.apache.flink.table.api.ValidationException;
-import org.apache.flink.table.types.DataType;
-import org.apache.flink.table.types.logical.DecimalType;
-import org.apache.flink.table.types.logical.LogicalTypeRoot;
-import org.apache.flink.table.types.logical.TimestampType;
-import org.apache.flink.table.types.logical.VarBinaryType;
-
-import java.util.List;
-
-/**
- * Default JDBC dialects implements for validate.
- */
-public abstract class AbstractJdbcDialect implements JdbcDialect {
-
- @Override
- public void validate(TableSchema schema) throws ValidationException {
- for (int i = 0; i < schema.getFieldCount(); i++) {
- DataType dt = schema.getFieldDataType(i).get();
- String fieldName = schema.getFieldName(i).get();
-
- // TODO: We can't convert VARBINARY(n) data type to
- // PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO in
- // LegacyTypeInfoDataTypeConverter
- // when n is smaller than Integer.MAX_VALUE
- if (unsupportedTypes().contains(dt.getLogicalType().getTypeRoot())
- || (dt.getLogicalType() instanceof VarBinaryType
- && Integer.MAX_VALUE
- != ((VarBinaryType) dt.getLogicalType()).getLength())) {
- throw new ValidationException(
- String.format(
- "The %s dialect doesn't support type: %s.",
- dialectName(), dt.toString()));
- }
-
- // only validate precision of DECIMAL type for blink planner
- if (dt.getLogicalType() instanceof DecimalType) {
- int precision = ((DecimalType) dt.getLogicalType()).getPrecision();
- if (precision > maxDecimalPrecision() || precision < minDecimalPrecision()) {
- throw new ValidationException(
- String.format(
- "The precision of field '%s' is out of the DECIMAL "
- + "precision range [%d, %d] supported by %s dialect.",
- fieldName,
- minDecimalPrecision(),
- maxDecimalPrecision(),
- dialectName()));
- }
- }
-
- // only validate precision of DECIMAL type for blink planner
- if (dt.getLogicalType() instanceof TimestampType) {
- int precision = ((TimestampType) dt.getLogicalType()).getPrecision();
- if (precision > maxTimestampPrecision() || precision < minTimestampPrecision()) {
- throw new ValidationException(
- String.format(
- "The precision of field '%s' is out of the TIMESTAMP "
- + "precision range [%d, %d] supported by %s dialect.",
- fieldName,
- minTimestampPrecision(),
- maxTimestampPrecision(),
- dialectName()));
- }
- }
- }
- }
-
- public abstract int maxDecimalPrecision();
-
- public abstract int minDecimalPrecision();
-
- public abstract int maxTimestampPrecision();
-
- public abstract int minTimestampPrecision();
-
- /**
- * Defines the unsupported types for the dialect.
- *
- * @return a list of logical type roots.
- */
- public abstract List<LogicalTypeRoot> unsupportedTypes();
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.sort.jdbc.table;
+
+import org.apache.flink.connector.jdbc.dialect.JdbcDialect;
+import org.apache.flink.table.api.TableSchema;
+import org.apache.flink.table.api.ValidationException;
+import org.apache.flink.table.types.DataType;
+import org.apache.flink.table.types.logical.DecimalType;
+import org.apache.flink.table.types.logical.LogicalTypeRoot;
+import org.apache.flink.table.types.logical.TimestampType;
+import org.apache.flink.table.types.logical.VarBinaryType;
+
+import java.util.List;
+
+/**
+ * Default JDBC dialects implements for validate.
+ */
+public abstract class AbstractJdbcDialect implements JdbcDialect {
+
+ @Override
+ public void validate(TableSchema schema) throws ValidationException {
+ for (int i = 0; i < schema.getFieldCount(); i++) {
+ DataType dt = schema.getFieldDataType(i).get();
+ String fieldName = schema.getFieldName(i).get();
+
+ // TODO: We can't convert VARBINARY(n) data type to
+ // PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO in
+ // LegacyTypeInfoDataTypeConverter
+ // when n is smaller than Integer.MAX_VALUE
+ if (unsupportedTypes().contains(dt.getLogicalType().getTypeRoot())
+ || (dt.getLogicalType() instanceof VarBinaryType
+ && Integer.MAX_VALUE
+ != ((VarBinaryType) dt.getLogicalType()).getLength())) {
+ throw new ValidationException(
+ String.format(
+ "The %s dialect doesn't support type: %s.",
+ dialectName(), dt.toString()));
+ }
+
+ // only validate precision of DECIMAL type for blink planner
+ if (dt.getLogicalType() instanceof DecimalType) {
+ int precision = ((DecimalType) dt.getLogicalType()).getPrecision();
+ if (precision > maxDecimalPrecision() || precision < minDecimalPrecision()) {
+ throw new ValidationException(
+ String.format(
+ "The precision of field '%s' is out of the DECIMAL "
+ + "precision range [%d, %d] supported by %s dialect.",
+ fieldName,
+ minDecimalPrecision(),
+ maxDecimalPrecision(),
+ dialectName()));
+ }
+ }
+
+ // only validate precision of DECIMAL type for blink planner
+ if (dt.getLogicalType() instanceof TimestampType) {
+ int precision = ((TimestampType) dt.getLogicalType()).getPrecision();
+ if (precision > maxTimestampPrecision() || precision < minTimestampPrecision()) {
+ throw new ValidationException(
+ String.format(
+ "The precision of field '%s' is out of the TIMESTAMP "
+ + "precision range [%d, %d] supported by %s dialect.",
+ fieldName,
+ minTimestampPrecision(),
+ maxTimestampPrecision(),
+ dialectName()));
+ }
+ }
+ }
+ }
+
+ public abstract int maxDecimalPrecision();
+
+ public abstract int minDecimalPrecision();
+
+ public abstract int maxTimestampPrecision();
+
+ public abstract int minTimestampPrecision();
+
+ /**
+ * Defines the unsupported types for the dialect.
+ *
+ * @return a list of logical type roots.
+ */
+ public abstract List<LogicalTypeRoot> unsupportedTypes();
+}
diff --git a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/jdbc/table/JdbcDialects.java b/inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/table/JdbcDialects.java
similarity index 88%
rename from inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/jdbc/table/JdbcDialects.java
rename to inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/table/JdbcDialects.java
index 326798211..d283a9e7e 100644
--- a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/jdbc/table/JdbcDialects.java
+++ b/inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/table/JdbcDialects.java
@@ -1,60 +1,64 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.sort.flink.jdbc.table;
-
-import org.apache.flink.connector.jdbc.dialect.JdbcDialect;
-import org.apache.flink.connector.jdbc.dialect.MySQLDialect;
-import org.apache.flink.connector.jdbc.dialect.PostgresDialect;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Optional;
-
-/**
- * Default JDBC dialects.
- */
-public final class JdbcDialects {
-
- private static final List<JdbcDialect> DIALECTS = new ArrayList<>();
-
- static {
- DIALECTS.add(new MySQLDialect());
- DIALECTS.add(new PostgresDialect());
- }
-
- /** Fetch the JdbcDialect class corresponding to a given database url. */
- public static Optional<JdbcDialect> get(String url) {
- for (JdbcDialect dialect : DIALECTS) {
- if (dialect.canHandle(url)) {
- return Optional.of(dialect);
- }
- }
- return Optional.empty();
- }
-
- /** Fetch the JdbcDialect class corresponding to a given database url. */
- public static void register(String dialectImpl) {
- try {
- JdbcDialect dialect = (JdbcDialect) Class.forName(dialectImpl).newInstance();
- DIALECTS.add(dialect);
- } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
- throw new IllegalArgumentException("Cannot register such dialect impl: " + dialectImpl, e);
- }
- }
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.sort.jdbc.table;
+
+import org.apache.flink.connector.jdbc.dialect.JdbcDialect;
+import org.apache.flink.connector.jdbc.dialect.MySQLDialect;
+import org.apache.flink.connector.jdbc.dialect.PostgresDialect;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+
+/**
+ * Default JDBC dialects.
+ */
+public final class JdbcDialects {
+
+ private static final List<JdbcDialect> DIALECTS = new ArrayList<>();
+
+ static {
+ DIALECTS.add(new MySQLDialect());
+ DIALECTS.add(new PostgresDialect());
+ }
+
+ /**
+ * Fetch the JdbcDialect class corresponding to a given database url.
+ */
+ public static Optional<JdbcDialect> get(String url) {
+ for (JdbcDialect dialect : DIALECTS) {
+ if (dialect.canHandle(url)) {
+ return Optional.of(dialect);
+ }
+ }
+ return Optional.empty();
+ }
+
+ /**
+ * Fetch the JdbcDialect class corresponding to a given database url.
+ */
+ public static void register(String dialectImpl) {
+ try {
+ JdbcDialect dialect = (JdbcDialect) Class.forName(dialectImpl).newInstance();
+ DIALECTS.add(dialect);
+ } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
+ throw new IllegalArgumentException("Cannot register such dialect impl: " + dialectImpl, e);
+ }
+ }
}
\ No newline at end of file
diff --git a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/jdbc/table/JdbcDynamicTableFactory.java b/inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/table/JdbcDynamicTableFactory.java
similarity index 96%
rename from inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/jdbc/table/JdbcDynamicTableFactory.java
rename to inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/table/JdbcDynamicTableFactory.java
index 896f186d2..006550ba1 100644
--- a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/jdbc/table/JdbcDynamicTableFactory.java
+++ b/inlong-sort/sort-connectors/jdbc/src/main/java/org/apache/inlong/sort/jdbc/table/JdbcDynamicTableFactory.java
@@ -1,384 +1,383 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.sort.flink.jdbc.table;
-
-import org.apache.flink.configuration.ConfigOption;
-import org.apache.flink.configuration.ConfigOptions;
-import org.apache.flink.configuration.ReadableConfig;
-import org.apache.flink.connector.jdbc.JdbcExecutionOptions;
-import org.apache.flink.connector.jdbc.dialect.JdbcDialect;
-import org.apache.flink.connector.jdbc.internal.options.JdbcDmlOptions;
-import org.apache.flink.connector.jdbc.internal.options.JdbcLookupOptions;
-import org.apache.flink.connector.jdbc.internal.options.JdbcOptions;
-import org.apache.flink.connector.jdbc.internal.options.JdbcReadOptions;
-import org.apache.flink.connector.jdbc.table.JdbcDynamicTableSink;
-import org.apache.flink.connector.jdbc.table.JdbcDynamicTableSource;
-import org.apache.flink.table.api.TableSchema;
-import org.apache.flink.table.connector.sink.DynamicTableSink;
-import org.apache.flink.table.connector.source.DynamicTableSource;
-import org.apache.flink.table.factories.DynamicTableSinkFactory;
-import org.apache.flink.table.factories.DynamicTableSourceFactory;
-import org.apache.flink.table.factories.FactoryUtil;
-import org.apache.flink.table.utils.TableSchemaUtils;
-import org.apache.flink.util.Preconditions;
-
-import java.time.Duration;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Optional;
-import java.util.Set;
-
-import static org.apache.flink.util.Preconditions.checkState;
-
-/**
- * Copy from org.apache.flink:flink-connector-jdbc_2.11:1.13.5
- *
- * Factory for creating configured instances of {@link JdbcDynamicTableSource} and {@link
- * JdbcDynamicTableSink}.We modify it to strengthen capacity of registering other dialect.
- */
-public class JdbcDynamicTableFactory implements DynamicTableSourceFactory, DynamicTableSinkFactory {
-
- public static final String IDENTIFIER = "jdbc-inlong";
- public static final ConfigOption<String> DIALECT_IMPL =
- ConfigOptions.key("dialect-impl")
- .stringType()
- .noDefaultValue()
- .withDescription("The JDBC Custom Dialect.");
- public static final ConfigOption<String> URL =
- ConfigOptions.key("url")
- .stringType()
- .noDefaultValue()
- .withDescription("The JDBC database URL.");
- public static final ConfigOption<String> TABLE_NAME =
- ConfigOptions.key("table-name")
- .stringType()
- .noDefaultValue()
- .withDescription("The JDBC table name.");
- public static final ConfigOption<String> USERNAME =
- ConfigOptions.key("username")
- .stringType()
- .noDefaultValue()
- .withDescription("The JDBC user name.");
- public static final ConfigOption<String> PASSWORD =
- ConfigOptions.key("password")
- .stringType()
- .noDefaultValue()
- .withDescription("The JDBC password.");
- private static final ConfigOption<String> DRIVER =
- ConfigOptions.key("driver")
- .stringType()
- .noDefaultValue()
- .withDescription(
- "The class name of the JDBC driver to use to connect to this URL. "
- + "If not set, it will automatically be derived from the URL.");
- public static final ConfigOption<Duration> MAX_RETRY_TIMEOUT =
- ConfigOptions.key("connection.max-retry-timeout")
- .durationType()
- .defaultValue(Duration.ofSeconds(60))
- .withDescription("Maximum timeout between retries.");
-
- // read config options
- private static final ConfigOption<String> SCAN_PARTITION_COLUMN =
- ConfigOptions.key("scan.partition.column")
- .stringType()
- .noDefaultValue()
- .withDescription("The column name used for partitioning the input.");
- private static final ConfigOption<Integer> SCAN_PARTITION_NUM =
- ConfigOptions.key("scan.partition.num")
- .intType()
- .noDefaultValue()
- .withDescription("The number of partitions.");
- private static final ConfigOption<Long> SCAN_PARTITION_LOWER_BOUND =
- ConfigOptions.key("scan.partition.lower-bound")
- .longType()
- .noDefaultValue()
- .withDescription("The smallest value of the first partition.");
- private static final ConfigOption<Long> SCAN_PARTITION_UPPER_BOUND =
- ConfigOptions.key("scan.partition.upper-bound")
- .longType()
- .noDefaultValue()
- .withDescription("The largest value of the last partition.");
- private static final ConfigOption<Integer> SCAN_FETCH_SIZE =
- ConfigOptions.key("scan.fetch-size")
- .intType()
- .defaultValue(0)
- .withDescription(
- "Gives the reader a hint as to the number of rows that should be fetched "
- + "from the database per round-trip when reading. "
- + "If the value is zero, this hint is ignored.");
- private static final ConfigOption<Boolean> SCAN_AUTO_COMMIT =
- ConfigOptions.key("scan.auto-commit")
- .booleanType()
- .defaultValue(true)
- .withDescription("Sets whether the driver is in auto-commit mode.");
-
- // look up config options
- private static final ConfigOption<Long> LOOKUP_CACHE_MAX_ROWS =
- ConfigOptions.key("lookup.cache.max-rows")
- .longType()
- .defaultValue(-1L)
- .withDescription(
- "The max number of rows of lookup cache, over this value, the oldest rows will "
- + "be eliminated. \"cache.max-rows\" and \"cache.ttl\""
- + " options must all be specified if any of them is specified.");
- private static final ConfigOption<Duration> LOOKUP_CACHE_TTL =
- ConfigOptions.key("lookup.cache.ttl")
- .durationType()
- .defaultValue(Duration.ofSeconds(10))
- .withDescription("The cache time to live.");
- private static final ConfigOption<Integer> LOOKUP_MAX_RETRIES =
- ConfigOptions.key("lookup.max-retries")
- .intType()
- .defaultValue(3)
- .withDescription("The max retry times if lookup database failed.");
-
- // write config options
- private static final ConfigOption<Integer> SINK_BUFFER_FLUSH_MAX_ROWS =
- ConfigOptions.key("sink.buffer-flush.max-rows")
- .intType()
- .defaultValue(100)
- .withDescription(
- "The flush max size (includes all append, upsert and delete records), over this number"
- + " of records, will flush data.");
- private static final ConfigOption<Duration> SINK_BUFFER_FLUSH_INTERVAL =
- ConfigOptions.key("sink.buffer-flush.interval")
- .durationType()
- .defaultValue(Duration.ofSeconds(1))
- .withDescription(
- "The flush interval mills, over this time, asynchronous threads will flush data.");
- private static final ConfigOption<Integer> SINK_MAX_RETRIES =
- ConfigOptions.key("sink.max-retries")
- .intType()
- .defaultValue(3)
- .withDescription("The max retry times if writing records to database failed.");
-
- @Override
- public DynamicTableSink createDynamicTableSink(Context context) {
- final FactoryUtil.TableFactoryHelper helper =
- FactoryUtil.createTableFactoryHelper(this, context);
- final ReadableConfig config = helper.getOptions();
-
- helper.validate();
- validateConfigOptions(config);
- JdbcOptions jdbcOptions = getJdbcOptions(config);
- TableSchema physicalSchema =
- TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema());
-
- return new JdbcDynamicTableSink(
- jdbcOptions,
- getJdbcExecutionOptions(config),
- getJdbcDmlOptions(jdbcOptions, physicalSchema),
- physicalSchema);
- }
-
- @Override
- public DynamicTableSource createDynamicTableSource(Context context) {
- final FactoryUtil.TableFactoryHelper helper =
- FactoryUtil.createTableFactoryHelper(this, context);
- final ReadableConfig config = helper.getOptions();
-
- helper.validate();
- validateConfigOptions(config);
- TableSchema physicalSchema =
- TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema());
- return new JdbcDynamicTableSource(
- getJdbcOptions(helper.getOptions()),
- getJdbcReadOptions(helper.getOptions()),
- getJdbcLookupOptions(helper.getOptions()),
- physicalSchema);
- }
-
- private JdbcOptions getJdbcOptions(ReadableConfig readableConfig) {
- final String url = readableConfig.get(URL);
- final JdbcOptions.Builder builder =
- JdbcOptions.builder()
- .setDBUrl(url)
- .setTableName(readableConfig.get(TABLE_NAME))
- .setDialect(JdbcDialects.get(url).get())
- .setParallelism(
- readableConfig
- .getOptional(FactoryUtil.SINK_PARALLELISM)
- .orElse(null))
- .setConnectionCheckTimeoutSeconds(
- (int) readableConfig.get(MAX_RETRY_TIMEOUT).getSeconds());
-
- readableConfig.getOptional(DRIVER).ifPresent(builder::setDriverName);
- readableConfig.getOptional(USERNAME).ifPresent(builder::setUsername);
- readableConfig.getOptional(PASSWORD).ifPresent(builder::setPassword);
- return builder.build();
- }
-
- private JdbcReadOptions getJdbcReadOptions(ReadableConfig readableConfig) {
- final Optional<String> partitionColumnName =
- readableConfig.getOptional(SCAN_PARTITION_COLUMN);
- final JdbcReadOptions.Builder builder = JdbcReadOptions.builder();
- if (partitionColumnName.isPresent()) {
- builder.setPartitionColumnName(partitionColumnName.get());
- builder.setPartitionLowerBound(readableConfig.get(SCAN_PARTITION_LOWER_BOUND));
- builder.setPartitionUpperBound(readableConfig.get(SCAN_PARTITION_UPPER_BOUND));
- builder.setNumPartitions(readableConfig.get(SCAN_PARTITION_NUM));
- }
- readableConfig.getOptional(SCAN_FETCH_SIZE).ifPresent(builder::setFetchSize);
- builder.setAutoCommit(readableConfig.get(SCAN_AUTO_COMMIT));
- return builder.build();
- }
-
- private JdbcLookupOptions getJdbcLookupOptions(ReadableConfig readableConfig) {
- return new JdbcLookupOptions(
- readableConfig.get(LOOKUP_CACHE_MAX_ROWS),
- readableConfig.get(LOOKUP_CACHE_TTL).toMillis(),
- readableConfig.get(LOOKUP_MAX_RETRIES));
- }
-
- private JdbcExecutionOptions getJdbcExecutionOptions(ReadableConfig config) {
- final JdbcExecutionOptions.Builder builder = new JdbcExecutionOptions.Builder();
- builder.withBatchSize(config.get(SINK_BUFFER_FLUSH_MAX_ROWS));
- builder.withBatchIntervalMs(config.get(SINK_BUFFER_FLUSH_INTERVAL).toMillis());
- builder.withMaxRetries(config.get(SINK_MAX_RETRIES));
- return builder.build();
- }
-
- private JdbcDmlOptions getJdbcDmlOptions(JdbcOptions jdbcOptions, TableSchema schema) {
- String[] keyFields =
- schema.getPrimaryKey()
- .map(pk -> pk.getColumns().toArray(new String[0]))
- .orElse(null);
-
- return JdbcDmlOptions.builder()
- .withTableName(jdbcOptions.getTableName())
- .withDialect(jdbcOptions.getDialect())
- .withFieldNames(schema.getFieldNames())
- .withKeyFields(keyFields)
- .build();
- }
-
- @Override
- public String factoryIdentifier() {
- return IDENTIFIER;
- }
-
- @Override
- public Set<ConfigOption<?>> requiredOptions() {
- Set<ConfigOption<?>> requiredOptions = new HashSet<>();
- requiredOptions.add(URL);
- requiredOptions.add(TABLE_NAME);
- return requiredOptions;
- }
-
- @Override
- public Set<ConfigOption<?>> optionalOptions() {
- Set<ConfigOption<?>> optionalOptions = new HashSet<>();
- optionalOptions.add(DRIVER);
- optionalOptions.add(USERNAME);
- optionalOptions.add(PASSWORD);
- optionalOptions.add(SCAN_PARTITION_COLUMN);
- optionalOptions.add(SCAN_PARTITION_LOWER_BOUND);
- optionalOptions.add(SCAN_PARTITION_UPPER_BOUND);
- optionalOptions.add(SCAN_PARTITION_NUM);
- optionalOptions.add(SCAN_FETCH_SIZE);
- optionalOptions.add(SCAN_AUTO_COMMIT);
- optionalOptions.add(LOOKUP_CACHE_MAX_ROWS);
- optionalOptions.add(LOOKUP_CACHE_TTL);
- optionalOptions.add(LOOKUP_MAX_RETRIES);
- optionalOptions.add(SINK_BUFFER_FLUSH_MAX_ROWS);
- optionalOptions.add(SINK_BUFFER_FLUSH_INTERVAL);
- optionalOptions.add(SINK_MAX_RETRIES);
- optionalOptions.add(FactoryUtil.SINK_PARALLELISM);
- optionalOptions.add(MAX_RETRY_TIMEOUT);
- optionalOptions.add(DIALECT_IMPL);
- return optionalOptions;
- }
-
- private void validateConfigOptions(ReadableConfig config) {
- // register custom dialect first
- config.getOptional(DIALECT_IMPL).ifPresent(JdbcDialects::register);
- String jdbcUrl = config.get(URL);
- final Optional<JdbcDialect> dialect = JdbcDialects.get(jdbcUrl);
- checkState(dialect.isPresent(), "Cannot handle such jdbc url: " + jdbcUrl);
-
- checkAllOrNone(config, new ConfigOption[] {USERNAME, PASSWORD});
-
- checkAllOrNone(
- config,
- new ConfigOption[] {
- SCAN_PARTITION_COLUMN,
- SCAN_PARTITION_NUM,
- SCAN_PARTITION_LOWER_BOUND,
- SCAN_PARTITION_UPPER_BOUND
- });
-
- if (config.getOptional(SCAN_PARTITION_LOWER_BOUND).isPresent()
- && config.getOptional(SCAN_PARTITION_UPPER_BOUND).isPresent()) {
- long lowerBound = config.get(SCAN_PARTITION_LOWER_BOUND);
- long upperBound = config.get(SCAN_PARTITION_UPPER_BOUND);
- if (lowerBound > upperBound) {
- throw new IllegalArgumentException(
- String.format(
- "'%s'='%s' must not be larger than '%s'='%s'.",
- SCAN_PARTITION_LOWER_BOUND.key(),
- lowerBound,
- SCAN_PARTITION_UPPER_BOUND.key(),
- upperBound));
- }
- }
-
- checkAllOrNone(config, new ConfigOption[] {LOOKUP_CACHE_MAX_ROWS, LOOKUP_CACHE_TTL});
-
- if (config.get(LOOKUP_MAX_RETRIES) < 0) {
- throw new IllegalArgumentException(
- String.format(
- "The value of '%s' option shouldn't be negative, but is %s.",
- LOOKUP_MAX_RETRIES.key(), config.get(LOOKUP_MAX_RETRIES)));
- }
-
- if (config.get(SINK_MAX_RETRIES) < 0) {
- throw new IllegalArgumentException(
- String.format(
- "The value of '%s' option shouldn't be negative, but is %s.",
- SINK_MAX_RETRIES.key(), config.get(SINK_MAX_RETRIES)));
- }
-
- if (config.get(MAX_RETRY_TIMEOUT).getSeconds() <= 0) {
- throw new IllegalArgumentException(
- String.format(
- "The value of '%s' option must be in second granularity and shouldn't be "
- + "smaller than 1 second, but is %s.",
- MAX_RETRY_TIMEOUT.key(),
- config.get(
- ConfigOptions.key(MAX_RETRY_TIMEOUT.key())
- .stringType()
- .noDefaultValue())));
- }
- }
-
- private void checkAllOrNone(ReadableConfig config, ConfigOption<?>[] configOptions) {
- int presentCount = 0;
- for (ConfigOption configOption : configOptions) {
- if (config.getOptional(configOption).isPresent()) {
- presentCount++;
- }
- }
- String[] propertyNames =
- Arrays.stream(configOptions).map(ConfigOption::key).toArray(String[]::new);
- Preconditions.checkArgument(
- configOptions.length == presentCount || presentCount == 0,
- "Either all or none of the following options should be provided:\n"
- + String.join("\n", propertyNames));
- }
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.sort.jdbc.table;
+
+import org.apache.flink.configuration.ConfigOption;
+import org.apache.flink.configuration.ConfigOptions;
+import org.apache.flink.configuration.ReadableConfig;
+import org.apache.flink.connector.jdbc.JdbcExecutionOptions;
+import org.apache.flink.connector.jdbc.dialect.JdbcDialect;
+import org.apache.flink.connector.jdbc.internal.options.JdbcDmlOptions;
+import org.apache.flink.connector.jdbc.internal.options.JdbcLookupOptions;
+import org.apache.flink.connector.jdbc.internal.options.JdbcOptions;
+import org.apache.flink.connector.jdbc.internal.options.JdbcReadOptions;
+import org.apache.flink.connector.jdbc.table.JdbcDynamicTableSink;
+import org.apache.flink.connector.jdbc.table.JdbcDynamicTableSource;
+import org.apache.flink.table.api.TableSchema;
+import org.apache.flink.table.connector.sink.DynamicTableSink;
+import org.apache.flink.table.connector.source.DynamicTableSource;
+import org.apache.flink.table.factories.DynamicTableSinkFactory;
+import org.apache.flink.table.factories.DynamicTableSourceFactory;
+import org.apache.flink.table.factories.FactoryUtil;
+import org.apache.flink.table.utils.TableSchemaUtils;
+import org.apache.flink.util.Preconditions;
+
+import java.time.Duration;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Optional;
+import java.util.Set;
+
+import static org.apache.flink.util.Preconditions.checkState;
+
+/**
+ * Copy from org.apache.flink:flink-connector-jdbc_2.11:1.13.5
+ * <p>
+ * Factory for creating configured instances of {@link JdbcDynamicTableSource} and {@link
+ * JdbcDynamicTableSink}.We modify it to strengthen capacity of registering other dialect.</p>
+ */
+public class JdbcDynamicTableFactory implements DynamicTableSourceFactory, DynamicTableSinkFactory {
+
+ public static final String IDENTIFIER = "jdbc-inlong";
+ public static final ConfigOption<String> DIALECT_IMPL =
+ ConfigOptions.key("dialect-impl")
+ .stringType()
+ .noDefaultValue()
+ .withDescription("The JDBC Custom Dialect.");
+ public static final ConfigOption<String> URL =
+ ConfigOptions.key("url")
+ .stringType()
+ .noDefaultValue()
+ .withDescription("The JDBC database URL.");
+ public static final ConfigOption<String> TABLE_NAME =
+ ConfigOptions.key("table-name")
+ .stringType()
+ .noDefaultValue()
+ .withDescription("The JDBC table name.");
+ public static final ConfigOption<String> USERNAME =
+ ConfigOptions.key("username")
+ .stringType()
+ .noDefaultValue()
+ .withDescription("The JDBC user name.");
+ public static final ConfigOption<String> PASSWORD =
+ ConfigOptions.key("password")
+ .stringType()
+ .noDefaultValue()
+ .withDescription("The JDBC password.");
+ public static final ConfigOption<Duration> MAX_RETRY_TIMEOUT =
+ ConfigOptions.key("connection.max-retry-timeout")
+ .durationType()
+ .defaultValue(Duration.ofSeconds(60))
+ .withDescription("Maximum timeout between retries.");
+ private static final ConfigOption<String> DRIVER =
+ ConfigOptions.key("driver")
+ .stringType()
+ .noDefaultValue()
+ .withDescription(
+ "The class name of the JDBC driver to use to connect to this URL. "
+ + "If not set, it will automatically be derived from the URL.");
+ // read config options
+ private static final ConfigOption<String> SCAN_PARTITION_COLUMN =
+ ConfigOptions.key("scan.partition.column")
+ .stringType()
+ .noDefaultValue()
+ .withDescription("The column name used for partitioning the input.");
+ private static final ConfigOption<Integer> SCAN_PARTITION_NUM =
+ ConfigOptions.key("scan.partition.num")
+ .intType()
+ .noDefaultValue()
+ .withDescription("The number of partitions.");
+ private static final ConfigOption<Long> SCAN_PARTITION_LOWER_BOUND =
+ ConfigOptions.key("scan.partition.lower-bound")
+ .longType()
+ .noDefaultValue()
+ .withDescription("The smallest value of the first partition.");
+ private static final ConfigOption<Long> SCAN_PARTITION_UPPER_BOUND =
+ ConfigOptions.key("scan.partition.upper-bound")
+ .longType()
+ .noDefaultValue()
+ .withDescription("The largest value of the last partition.");
+ private static final ConfigOption<Integer> SCAN_FETCH_SIZE =
+ ConfigOptions.key("scan.fetch-size")
+ .intType()
+ .defaultValue(0)
+ .withDescription(
+ "Gives the reader a hint as to the number of rows that should be fetched "
+ + "from the database per round-trip when reading. "
+ + "If the value is zero, this hint is ignored.");
+ private static final ConfigOption<Boolean> SCAN_AUTO_COMMIT =
+ ConfigOptions.key("scan.auto-commit")
+ .booleanType()
+ .defaultValue(true)
+ .withDescription("Sets whether the driver is in auto-commit mode.");
+
+ // look up config options
+ private static final ConfigOption<Long> LOOKUP_CACHE_MAX_ROWS =
+ ConfigOptions.key("lookup.cache.max-rows")
+ .longType()
+ .defaultValue(-1L)
+ .withDescription(
+ "The max number of rows of lookup cache, over this value, the oldest rows will "
+ + "be eliminated. \"cache.max-rows\" and \"cache.ttl\""
+ + " options must all be specified if any of them is specified.");
+ private static final ConfigOption<Duration> LOOKUP_CACHE_TTL =
+ ConfigOptions.key("lookup.cache.ttl")
+ .durationType()
+ .defaultValue(Duration.ofSeconds(10))
+ .withDescription("The cache time to live.");
+ private static final ConfigOption<Integer> LOOKUP_MAX_RETRIES =
+ ConfigOptions.key("lookup.max-retries")
+ .intType()
+ .defaultValue(3)
+ .withDescription("The max retry times if lookup database failed.");
+
+ // write config options
+ private static final ConfigOption<Integer> SINK_BUFFER_FLUSH_MAX_ROWS =
+ ConfigOptions.key("sink.buffer-flush.max-rows")
+ .intType()
+ .defaultValue(100)
+ .withDescription(
+ "The flush max size (includes all append, upsert and delete records), over this number"
+ + " of records, will flush data.");
+ private static final ConfigOption<Duration> SINK_BUFFER_FLUSH_INTERVAL =
+ ConfigOptions.key("sink.buffer-flush.interval")
+ .durationType()
+ .defaultValue(Duration.ofSeconds(1))
+ .withDescription(
+ "The flush interval mills, over this time, asynchronous threads will flush data.");
+ private static final ConfigOption<Integer> SINK_MAX_RETRIES =
+ ConfigOptions.key("sink.max-retries")
+ .intType()
+ .defaultValue(3)
+ .withDescription("The max retry times if writing records to database failed.");
+
+ @Override
+ public DynamicTableSink createDynamicTableSink(Context context) {
+ final FactoryUtil.TableFactoryHelper helper =
+ FactoryUtil.createTableFactoryHelper(this, context);
+ final ReadableConfig config = helper.getOptions();
+
+ helper.validate();
+ validateConfigOptions(config);
+ JdbcOptions jdbcOptions = getJdbcOptions(config);
+ TableSchema physicalSchema =
+ TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema());
+
+ return new JdbcDynamicTableSink(
+ jdbcOptions,
+ getJdbcExecutionOptions(config),
+ getJdbcDmlOptions(jdbcOptions, physicalSchema),
+ physicalSchema);
+ }
+
+ @Override
+ public DynamicTableSource createDynamicTableSource(Context context) {
+ final FactoryUtil.TableFactoryHelper helper =
+ FactoryUtil.createTableFactoryHelper(this, context);
+ final ReadableConfig config = helper.getOptions();
+
+ helper.validate();
+ validateConfigOptions(config);
+ TableSchema physicalSchema =
+ TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema());
+ return new JdbcDynamicTableSource(
+ getJdbcOptions(helper.getOptions()),
+ getJdbcReadOptions(helper.getOptions()),
+ getJdbcLookupOptions(helper.getOptions()),
+ physicalSchema);
+ }
+
+ private JdbcOptions getJdbcOptions(ReadableConfig readableConfig) {
+ final String url = readableConfig.get(URL);
+ final JdbcOptions.Builder builder =
+ JdbcOptions.builder()
+ .setDBUrl(url)
+ .setTableName(readableConfig.get(TABLE_NAME))
+ .setDialect(JdbcDialects.get(url).get())
+ .setParallelism(
+ readableConfig
+ .getOptional(FactoryUtil.SINK_PARALLELISM)
+ .orElse(null))
+ .setConnectionCheckTimeoutSeconds(
+ (int) readableConfig.get(MAX_RETRY_TIMEOUT).getSeconds());
+
+ readableConfig.getOptional(DRIVER).ifPresent(builder::setDriverName);
+ readableConfig.getOptional(USERNAME).ifPresent(builder::setUsername);
+ readableConfig.getOptional(PASSWORD).ifPresent(builder::setPassword);
+ return builder.build();
+ }
+
+ private JdbcReadOptions getJdbcReadOptions(ReadableConfig readableConfig) {
+ final Optional<String> partitionColumnName =
+ readableConfig.getOptional(SCAN_PARTITION_COLUMN);
+ final JdbcReadOptions.Builder builder = JdbcReadOptions.builder();
+ if (partitionColumnName.isPresent()) {
+ builder.setPartitionColumnName(partitionColumnName.get());
+ builder.setPartitionLowerBound(readableConfig.get(SCAN_PARTITION_LOWER_BOUND));
+ builder.setPartitionUpperBound(readableConfig.get(SCAN_PARTITION_UPPER_BOUND));
+ builder.setNumPartitions(readableConfig.get(SCAN_PARTITION_NUM));
+ }
+ readableConfig.getOptional(SCAN_FETCH_SIZE).ifPresent(builder::setFetchSize);
+ builder.setAutoCommit(readableConfig.get(SCAN_AUTO_COMMIT));
+ return builder.build();
+ }
+
+ private JdbcLookupOptions getJdbcLookupOptions(ReadableConfig readableConfig) {
+ return new JdbcLookupOptions(
+ readableConfig.get(LOOKUP_CACHE_MAX_ROWS),
+ readableConfig.get(LOOKUP_CACHE_TTL).toMillis(),
+ readableConfig.get(LOOKUP_MAX_RETRIES));
+ }
+
+ private JdbcExecutionOptions getJdbcExecutionOptions(ReadableConfig config) {
+ final JdbcExecutionOptions.Builder builder = new JdbcExecutionOptions.Builder();
+ builder.withBatchSize(config.get(SINK_BUFFER_FLUSH_MAX_ROWS));
+ builder.withBatchIntervalMs(config.get(SINK_BUFFER_FLUSH_INTERVAL).toMillis());
+ builder.withMaxRetries(config.get(SINK_MAX_RETRIES));
+ return builder.build();
+ }
+
+ private JdbcDmlOptions getJdbcDmlOptions(JdbcOptions jdbcOptions, TableSchema schema) {
+ String[] keyFields =
+ schema.getPrimaryKey()
+ .map(pk -> pk.getColumns().toArray(new String[0]))
+ .orElse(null);
+
+ return JdbcDmlOptions.builder()
+ .withTableName(jdbcOptions.getTableName())
+ .withDialect(jdbcOptions.getDialect())
+ .withFieldNames(schema.getFieldNames())
+ .withKeyFields(keyFields)
+ .build();
+ }
+
+ @Override
+ public String factoryIdentifier() {
+ return IDENTIFIER;
+ }
+
+ @Override
+ public Set<ConfigOption<?>> requiredOptions() {
+ Set<ConfigOption<?>> requiredOptions = new HashSet<>();
+ requiredOptions.add(URL);
+ requiredOptions.add(TABLE_NAME);
+ return requiredOptions;
+ }
+
+ @Override
+ public Set<ConfigOption<?>> optionalOptions() {
+ Set<ConfigOption<?>> optionalOptions = new HashSet<>();
+ optionalOptions.add(DRIVER);
+ optionalOptions.add(USERNAME);
+ optionalOptions.add(PASSWORD);
+ optionalOptions.add(SCAN_PARTITION_COLUMN);
+ optionalOptions.add(SCAN_PARTITION_LOWER_BOUND);
+ optionalOptions.add(SCAN_PARTITION_UPPER_BOUND);
+ optionalOptions.add(SCAN_PARTITION_NUM);
+ optionalOptions.add(SCAN_FETCH_SIZE);
+ optionalOptions.add(SCAN_AUTO_COMMIT);
+ optionalOptions.add(LOOKUP_CACHE_MAX_ROWS);
+ optionalOptions.add(LOOKUP_CACHE_TTL);
+ optionalOptions.add(LOOKUP_MAX_RETRIES);
+ optionalOptions.add(SINK_BUFFER_FLUSH_MAX_ROWS);
+ optionalOptions.add(SINK_BUFFER_FLUSH_INTERVAL);
+ optionalOptions.add(SINK_MAX_RETRIES);
+ optionalOptions.add(FactoryUtil.SINK_PARALLELISM);
+ optionalOptions.add(MAX_RETRY_TIMEOUT);
+ optionalOptions.add(DIALECT_IMPL);
+ return optionalOptions;
+ }
+
+ private void validateConfigOptions(ReadableConfig config) {
+ // register custom dialect first
+ config.getOptional(DIALECT_IMPL).ifPresent(JdbcDialects::register);
+ String jdbcUrl = config.get(URL);
+ final Optional<JdbcDialect> dialect = JdbcDialects.get(jdbcUrl);
+ checkState(dialect.isPresent(), "Cannot handle such jdbc url: " + jdbcUrl);
+
+ checkAllOrNone(config, new ConfigOption[]{USERNAME, PASSWORD});
+
+ checkAllOrNone(
+ config,
+ new ConfigOption[]{
+ SCAN_PARTITION_COLUMN,
+ SCAN_PARTITION_NUM,
+ SCAN_PARTITION_LOWER_BOUND,
+ SCAN_PARTITION_UPPER_BOUND
+ });
+
+ if (config.getOptional(SCAN_PARTITION_LOWER_BOUND).isPresent()
+ && config.getOptional(SCAN_PARTITION_UPPER_BOUND).isPresent()) {
+ long lowerBound = config.get(SCAN_PARTITION_LOWER_BOUND);
+ long upperBound = config.get(SCAN_PARTITION_UPPER_BOUND);
+ if (lowerBound > upperBound) {
+ throw new IllegalArgumentException(
+ String.format(
+ "'%s'='%s' must not be larger than '%s'='%s'.",
+ SCAN_PARTITION_LOWER_BOUND.key(),
+ lowerBound,
+ SCAN_PARTITION_UPPER_BOUND.key(),
+ upperBound));
+ }
+ }
+
+ checkAllOrNone(config, new ConfigOption[]{LOOKUP_CACHE_MAX_ROWS, LOOKUP_CACHE_TTL});
+
+ if (config.get(LOOKUP_MAX_RETRIES) < 0) {
+ throw new IllegalArgumentException(
+ String.format(
+ "The value of '%s' option shouldn't be negative, but is %s.",
+ LOOKUP_MAX_RETRIES.key(), config.get(LOOKUP_MAX_RETRIES)));
+ }
+
+ if (config.get(SINK_MAX_RETRIES) < 0) {
+ throw new IllegalArgumentException(
+ String.format(
+ "The value of '%s' option shouldn't be negative, but is %s.",
+ SINK_MAX_RETRIES.key(), config.get(SINK_MAX_RETRIES)));
+ }
+
+ if (config.get(MAX_RETRY_TIMEOUT).getSeconds() <= 0) {
+ throw new IllegalArgumentException(
+ String.format(
+ "The value of '%s' option must be in second granularity and shouldn't be "
+ + "smaller than 1 second, but is %s.",
+ MAX_RETRY_TIMEOUT.key(),
+ config.get(
+ ConfigOptions.key(MAX_RETRY_TIMEOUT.key())
+ .stringType()
+ .noDefaultValue())));
+ }
+ }
+
+ private void checkAllOrNone(ReadableConfig config, ConfigOption<?>[] configOptions) {
+ int presentCount = 0;
+ for (ConfigOption configOption : configOptions) {
+ if (config.getOptional(configOption).isPresent()) {
+ presentCount++;
+ }
+ }
+ String[] propertyNames =
+ Arrays.stream(configOptions).map(ConfigOption::key).toArray(String[]::new);
+ Preconditions.checkArgument(
+ configOptions.length == presentCount || presentCount == 0,
+ "Either all or none of the following options should be provided:\n"
+ + String.join("\n", propertyNames));
+ }
+}
diff --git a/inlong-sort/sort-connectors/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory b/inlong-sort/sort-connectors/jdbc/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
similarity index 78%
copy from inlong-sort/sort-connectors/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
copy to inlong-sort/sort-connectors/jdbc/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
index e4286c7ff..a14e9cc44 100644
--- a/inlong-sort/sort-connectors/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
+++ b/inlong-sort/sort-connectors/jdbc/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
@@ -1,18 +1,16 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.inlong.sort.flink.kafka.KafkaDynamicTableFactory
-org.apache.inlong.sort.flink.jdbc.table.JdbcDynamicTableFactory
-org.apache.inlong.sort.flink.pulsar.table.PulsarDynamicTableFactory
\ No newline at end of file
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.inlong.sort.jdbc.table.JdbcDynamicTableFactory
\ No newline at end of file
diff --git a/inlong-sort/sort-connectors/kafka/pom.xml b/inlong-sort/sort-connectors/kafka/pom.xml
new file mode 100644
index 000000000..e1cf1620b
--- /dev/null
+++ b/inlong-sort/sort-connectors/kafka/pom.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>sort-connectors</artifactId>
+ <groupId>org.apache.inlong</groupId>
+ <version>1.2.0-incubating-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+
+ <artifactId>sort-connector-kafka</artifactId>
+ <name>Apache InLong - Sort-connector-kafka</name>
+ <packaging>jar</packaging>
+
+</project>
\ No newline at end of file
diff --git a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/kafka/DynamicKafkaSerializationSchema.java b/inlong-sort/sort-connectors/kafka/src/main/java/org/apache/inlong/sort/kafka/DynamicKafkaSerializationSchema.java
similarity index 95%
rename from inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/kafka/DynamicKafkaSerializationSchema.java
rename to inlong-sort/sort-connectors/kafka/src/main/java/org/apache/inlong/sort/kafka/DynamicKafkaSerializationSchema.java
index be3f26487..dd50a7fc1 100644
--- a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/kafka/DynamicKafkaSerializationSchema.java
+++ b/inlong-sort/sort-connectors/kafka/src/main/java/org/apache/inlong/sort/kafka/DynamicKafkaSerializationSchema.java
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.flink.kafka;
+package org.apache.inlong.sort.kafka;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.streaming.connectors.kafka.KafkaContextAware;
@@ -25,7 +25,7 @@ import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.types.RowKind;
import org.apache.flink.util.Preconditions;
-import org.apache.inlong.sort.flink.kafka.KafkaDynamicSink.WritableMetadata;
+import org.apache.inlong.sort.kafka.KafkaDynamicSink.WritableMetadata;
import org.apache.kafka.clients.producer.ProducerRecord;
import javax.annotation.Nullable;
@@ -93,6 +93,16 @@ class DynamicKafkaSerializationSchema
this.upsertMode = upsertMode;
}
+ static RowData createProjectedRow(
+ RowData consumedRow, RowKind kind, RowData.FieldGetter[] fieldGetters) {
+ final int arity = fieldGetters.length;
+ final GenericRowData genericRowData = new GenericRowData(kind, arity);
+ for (int fieldPos = 0; fieldPos < arity; fieldPos++) {
+ genericRowData.setField(fieldPos, fieldGetters[fieldPos].getFieldOrNull(consumedRow));
+ }
+ return genericRowData;
+ }
+
@Override
public void open(SerializationSchema.InitializationContext context) throws Exception {
if (keySerialization != null) {
@@ -143,10 +153,10 @@ class DynamicKafkaSerializationSchema
return new ProducerRecord<>(
topic,
extractPartition(consumedRow, keySerialized, valueSerialized),
- readMetadata(consumedRow, WritableMetadata.TIMESTAMP),
+ readMetadata(consumedRow, KafkaDynamicSink.WritableMetadata.TIMESTAMP),
keySerialized,
valueSerialized,
- readMetadata(consumedRow, WritableMetadata.HEADERS));
+ readMetadata(consumedRow, KafkaDynamicSink.WritableMetadata.HEADERS));
}
@Override
@@ -170,7 +180,7 @@ class DynamicKafkaSerializationSchema
}
@SuppressWarnings("unchecked")
- private <T> T readMetadata(RowData consumedRow, WritableMetadata metadata) {
+ private <T> T readMetadata(RowData consumedRow, KafkaDynamicSink.WritableMetadata metadata) {
final int pos = metadataPositions[metadata.ordinal()];
if (pos < 0) {
return null;
@@ -187,16 +197,6 @@ class DynamicKafkaSerializationSchema
return null;
}
- static RowData createProjectedRow(
- RowData consumedRow, RowKind kind, RowData.FieldGetter[] fieldGetters) {
- final int arity = fieldGetters.length;
- final GenericRowData genericRowData = new GenericRowData(kind, arity);
- for (int fieldPos = 0; fieldPos < arity; fieldPos++) {
- genericRowData.setField(fieldPos, fieldGetters[fieldPos].getFieldOrNull(consumedRow));
- }
- return genericRowData;
- }
-
// --------------------------------------------------------------------------------------------
interface MetadataConverter extends Serializable {
diff --git a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/kafka/KafkaDynamicSink.java b/inlong-sort/sort-connectors/kafka/src/main/java/org/apache/inlong/sort/kafka/KafkaDynamicSink.java
similarity index 93%
rename from inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/kafka/KafkaDynamicSink.java
rename to inlong-sort/sort-connectors/kafka/src/main/java/org/apache/inlong/sort/kafka/KafkaDynamicSink.java
index 5509de45c..385609ead 100644
--- a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/kafka/KafkaDynamicSink.java
+++ b/inlong-sort/sort-connectors/kafka/src/main/java/org/apache/inlong/sort/kafka/KafkaDynamicSink.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.flink.kafka;
+package org.apache.inlong.sort.kafka;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.serialization.SerializationSchema;
@@ -37,12 +37,10 @@ import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.utils.DataTypeUtils;
-
-import org.apache.inlong.sort.flink.kafka.DynamicKafkaSerializationSchema.MetadataConverter;
+import org.apache.inlong.sort.kafka.DynamicKafkaSerializationSchema.MetadataConverter;
import org.apache.kafka.common.header.Header;
import javax.annotation.Nullable;
-
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -56,7 +54,9 @@ import java.util.stream.Stream;
import static org.apache.flink.util.Preconditions.checkNotNull;
-/** A version-agnostic Kafka {@link DynamicTableSink}. */
+/**
+ * A version-agnostic Kafka {@link DynamicTableSink}.
+ */
@Internal
public class KafkaDynamicSink implements DynamicTableSink, SupportsWritingMetadata {
@@ -64,62 +64,75 @@ public class KafkaDynamicSink implements DynamicTableSink, SupportsWritingMetada
// Mutable attributes
// --------------------------------------------------------------------------------------------
- /** Metadata that is appended at the end of a physical sink row. */
- protected List<String> metadataKeys;
-
// --------------------------------------------------------------------------------------------
// Format attributes
// --------------------------------------------------------------------------------------------
private static final String VALUE_METADATA_PREFIX = "value.";
-
- /** Data type of consumed data type. */
- protected DataType consumedDataType;
-
- /** Data type to configure the formats. */
+ /**
+ * Data type to configure the formats.
+ */
protected final DataType physicalDataType;
-
- /** Optional format for encoding keys to Kafka. */
+ /**
+ * Optional format for encoding keys to Kafka.
+ */
protected final @Nullable EncodingFormat<SerializationSchema<RowData>> keyEncodingFormat;
-
- /** Format for encoding values to Kafka. */
+ /**
+ * Format for encoding values to Kafka.
+ */
protected final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat;
-
- /** Indices that determine the key fields and the source position in the consumed row. */
+ /**
+ * Indices that determine the key fields and the source position in the consumed row.
+ */
protected final int[] keyProjection;
-
- /** Indices that determine the value fields and the source position in the consumed row. */
+ /**
+ * Indices that determine the value fields and the source position in the consumed row.
+ */
protected final int[] valueProjection;
-
- /** Prefix that needs to be removed from fields when constructing the physical data type. */
+ /**
+ * Prefix that needs to be removed from fields when constructing the physical data type.
+ */
protected final @Nullable String keyPrefix;
+ /**
+ * The Kafka topic to write to.
+ */
+ protected final String topic;
+ /**
+ * Properties for the Kafka producer.
+ */
+ protected final Properties properties;
// --------------------------------------------------------------------------------------------
// Kafka-specific attributes
// --------------------------------------------------------------------------------------------
-
- /** The Kafka topic to write to. */
- protected final String topic;
-
- /** Properties for the Kafka producer. */
- protected final Properties properties;
-
- /** Partitioner to select Kafka partition for each item. */
+ /**
+ * Partitioner to select Kafka partition for each item.
+ */
protected final @Nullable FlinkKafkaPartitioner<RowData> partitioner;
-
- /** Sink commit semantic. */
+ /**
+ * Sink commit semantic.
+ */
protected final KafkaSinkSemantic semantic;
-
/**
* Flag to determine sink mode. In upsert mode sink transforms the delete/update-before message
* to tombstone message.
*/
protected final boolean upsertMode;
-
- /** Sink buffer flush config which only supported in upsert mode now. */
+ /**
+ * Sink buffer flush config which only supported in upsert mode now.
+ */
protected final SinkBufferFlushMode flushMode;
-
- /** Parallelism of the physical Kafka producer. * */
+ /**
+ * Parallelism of the physical Kafka producer. *
+ */
protected final @Nullable Integer parallelism;
+ /**
+ * Metadata that is appended at the end of a physical sink row.
+ */
+ protected List<String> metadataKeys;
+ /**
+ * Data type of consumed data type.
+ */
+ protected DataType consumedDataType;
/**
* Constructor of KafkaDynamicSink.
@@ -372,7 +385,7 @@ public class KafkaDynamicSink implements DynamicTableSink, SupportsWritingMetada
}
private @Nullable SerializationSchema<RowData> createSerialization(
- DynamicTableSink.Context context,
+ Context context,
@Nullable EncodingFormat<SerializationSchema<RowData>> format,
int[] projection,
@Nullable String prefix) {
diff --git a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/kafka/KafkaDynamicTableFactory.java b/inlong-sort/sort-connectors/kafka/src/main/java/org/apache/inlong/sort/kafka/table/KafkaDynamicTableFactory.java
similarity index 98%
rename from inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/kafka/KafkaDynamicTableFactory.java
rename to inlong-sort/sort-connectors/kafka/src/main/java/org/apache/inlong/sort/kafka/table/KafkaDynamicTableFactory.java
index 0f74f04e0..ea41c312f 100644
--- a/inlong-sort/sort-connectors/src/main/java/org/apache/inlong/sort/flink/kafka/KafkaDynamicTableFactory.java
+++ b/inlong-sort/sort-connectors/kafka/src/main/java/org/apache/inlong/sort/kafka/table/KafkaDynamicTableFactory.java
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.flink.kafka;
+package org.apache.inlong.sort.kafka.table;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.common.serialization.DeserializationSchema;
@@ -48,9 +48,9 @@ import org.apache.flink.table.factories.FactoryUtil.TableFactoryHelper;
import org.apache.flink.table.factories.SerializationFormatFactory;
import org.apache.flink.table.types.DataType;
import org.apache.flink.types.RowKind;
+import org.apache.inlong.sort.kafka.KafkaDynamicSink;
import javax.annotation.Nullable;
-
import java.time.Duration;
import java.util.HashSet;
import java.util.List;
@@ -89,10 +89,10 @@ import static org.apache.flink.streaming.connectors.kafka.table.KafkaOptions.val
import static org.apache.flink.table.factories.FactoryUtil.SINK_PARALLELISM;
/**
- * Copy from org.apache.flink:flink-connector-kafka_2.11:1.13.5
- *
+ * Copy from org.apache.flink:flink-connector-kafka_2.11:1.13.5
+ * <p>
* Factory for creating configured instances of {@link KafkaDynamicSource} and {@link
- * KafkaDynamicSink}.We modify KafkaDynamicTableSink to support format metadata writeable.
+ * KafkaDynamicSink}.We modify KafkaDynamicTableSink to support format metadata writeable.</p>
*/
@Internal
public class KafkaDynamicTableFactory
@@ -100,6 +100,80 @@ public class KafkaDynamicTableFactory
public static final String IDENTIFIER = "kafka-inlong";
+ private static Optional<DecodingFormat<DeserializationSchema<RowData>>> getKeyDecodingFormat(
+ TableFactoryHelper helper) {
+ final Optional<DecodingFormat<DeserializationSchema<RowData>>> keyDecodingFormat =
+ helper.discoverOptionalDecodingFormat(
+ DeserializationFormatFactory.class, KEY_FORMAT);
+ keyDecodingFormat.ifPresent(
+ format -> {
+ if (!format.getChangelogMode().containsOnly(RowKind.INSERT)) {
+ throw new ValidationException(
+ String.format(
+ "A key format should only deal with INSERT-only records. "
+ + "But %s has a changelog mode of %s.",
+ helper.getOptions().get(KEY_FORMAT),
+ format.getChangelogMode()));
+ }
+ });
+ return keyDecodingFormat;
+ }
+
+ private static Optional<EncodingFormat<SerializationSchema<RowData>>> getKeyEncodingFormat(
+ TableFactoryHelper helper) {
+ final Optional<EncodingFormat<SerializationSchema<RowData>>> keyEncodingFormat =
+ helper.discoverOptionalEncodingFormat(SerializationFormatFactory.class, KEY_FORMAT);
+ keyEncodingFormat.ifPresent(
+ format -> {
+ if (!format.getChangelogMode().containsOnly(RowKind.INSERT)) {
+ throw new ValidationException(
+ String.format(
+ "A key format should only deal with INSERT-only records. "
+ + "But %s has a changelog mode of %s.",
+ helper.getOptions().get(KEY_FORMAT),
+ format.getChangelogMode()));
+ }
+ });
+ return keyEncodingFormat;
+ }
+
+ private static DecodingFormat<DeserializationSchema<RowData>> getValueDecodingFormat(
+ TableFactoryHelper helper) {
+ return helper.discoverOptionalDecodingFormat(
+ DeserializationFormatFactory.class, FactoryUtil.FORMAT)
+ .orElseGet(
+ () ->
+ helper.discoverDecodingFormat(
+ DeserializationFormatFactory.class, VALUE_FORMAT));
+ }
+
+ private static EncodingFormat<SerializationSchema<RowData>> getValueEncodingFormat(
+ TableFactoryHelper helper) {
+ return helper.discoverOptionalEncodingFormat(
+ SerializationFormatFactory.class, FactoryUtil.FORMAT)
+ .orElseGet(
+ () ->
+ helper.discoverEncodingFormat(
+ SerializationFormatFactory.class, VALUE_FORMAT));
+ }
+
+ private static void validatePKConstraints(
+ ObjectIdentifier tableName, CatalogTable catalogTable, Format format) {
+ if (catalogTable.getSchema().getPrimaryKey().isPresent()
+ && format.getChangelogMode().containsOnly(RowKind.INSERT)) {
+ Configuration options = Configuration.fromMap(catalogTable.getOptions());
+ String formatName =
+ options.getOptional(FactoryUtil.FORMAT).orElse(options.get(VALUE_FORMAT));
+ throw new ValidationException(
+ String.format(
+ "The Kafka table '%s' with '%s' format doesn't support defining PRIMARY KEY constraint"
+ + " on the table, because it can't guarantee the semantic of primary key.",
+ tableName.asSummaryString(), formatName));
+ }
+ }
+
+ // --------------------------------------------------------------------------------------------
+
@Override
public String factoryIdentifier() {
return IDENTIFIER;
@@ -238,80 +312,6 @@ public class KafkaDynamicTableFactory
// --------------------------------------------------------------------------------------------
- private static Optional<DecodingFormat<DeserializationSchema<RowData>>> getKeyDecodingFormat(
- TableFactoryHelper helper) {
- final Optional<DecodingFormat<DeserializationSchema<RowData>>> keyDecodingFormat =
- helper.discoverOptionalDecodingFormat(
- DeserializationFormatFactory.class, KEY_FORMAT);
- keyDecodingFormat.ifPresent(
- format -> {
- if (!format.getChangelogMode().containsOnly(RowKind.INSERT)) {
- throw new ValidationException(
- String.format(
- "A key format should only deal with INSERT-only records. "
- + "But %s has a changelog mode of %s.",
- helper.getOptions().get(KEY_FORMAT),
- format.getChangelogMode()));
- }
- });
- return keyDecodingFormat;
- }
-
- private static Optional<EncodingFormat<SerializationSchema<RowData>>> getKeyEncodingFormat(
- TableFactoryHelper helper) {
- final Optional<EncodingFormat<SerializationSchema<RowData>>> keyEncodingFormat =
- helper.discoverOptionalEncodingFormat(SerializationFormatFactory.class, KEY_FORMAT);
- keyEncodingFormat.ifPresent(
- format -> {
- if (!format.getChangelogMode().containsOnly(RowKind.INSERT)) {
- throw new ValidationException(
- String.format(
- "A key format should only deal with INSERT-only records. "
- + "But %s has a changelog mode of %s.",
- helper.getOptions().get(KEY_FORMAT),
- format.getChangelogMode()));
- }
- });
- return keyEncodingFormat;
- }
-
- private static DecodingFormat<DeserializationSchema<RowData>> getValueDecodingFormat(
- TableFactoryHelper helper) {
- return helper.discoverOptionalDecodingFormat(
- DeserializationFormatFactory.class, FactoryUtil.FORMAT)
- .orElseGet(
- () ->
- helper.discoverDecodingFormat(
- DeserializationFormatFactory.class, VALUE_FORMAT));
- }
-
- private static EncodingFormat<SerializationSchema<RowData>> getValueEncodingFormat(
- TableFactoryHelper helper) {
- return helper.discoverOptionalEncodingFormat(
- SerializationFormatFactory.class, FactoryUtil.FORMAT)
- .orElseGet(
- () ->
- helper.discoverEncodingFormat(
- SerializationFormatFactory.class, VALUE_FORMAT));
- }
-
- private static void validatePKConstraints(
- ObjectIdentifier tableName, CatalogTable catalogTable, Format format) {
- if (catalogTable.getSchema().getPrimaryKey().isPresent()
- && format.getChangelogMode().containsOnly(RowKind.INSERT)) {
- Configuration options = Configuration.fromMap(catalogTable.getOptions());
- String formatName =
- options.getOptional(FactoryUtil.FORMAT).orElse(options.get(VALUE_FORMAT));
- throw new ValidationException(
- String.format(
- "The Kafka table '%s' with '%s' format doesn't support defining PRIMARY KEY constraint"
- + " on the table, because it can't guarantee the semantic of primary key.",
- tableName.asSummaryString(), formatName));
- }
- }
-
- // --------------------------------------------------------------------------------------------
-
protected KafkaDynamicSource createKafkaTableSource(
DataType physicalDataType,
@Nullable DecodingFormat<DeserializationSchema<RowData>> keyDecodingFormat,
diff --git a/inlong-sort/sort-connectors/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory b/inlong-sort/sort-connectors/kafka/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
similarity index 78%
rename from inlong-sort/sort-connectors/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
rename to inlong-sort/sort-connectors/kafka/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
index e4286c7ff..543f247b3 100644
--- a/inlong-sort/sort-connectors/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
+++ b/inlong-sort/sort-connectors/kafka/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
@@ -1,18 +1,16 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.inlong.sort.flink.kafka.KafkaDynamicTableFactory
-org.apache.inlong.sort.flink.jdbc.table.JdbcDynamicTableFactory
-org.apache.inlong.sort.flink.pulsar.table.PulsarDynamicTableFactory
\ No newline at end of file
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.inlong.sort.kafka.table.KafkaDynamicTableFactory
\ No newline at end of file
diff --git a/inlong-sort/sort-connectors/mysql-cdc/pom.xml b/inlong-sort/sort-connectors/mysql-cdc/pom.xml
new file mode 100644
index 000000000..d99ec1f8a
--- /dev/null
+++ b/inlong-sort/sort-connectors/mysql-cdc/pom.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>sort-connectors</artifactId>
+ <groupId>org.apache.inlong</groupId>
+ <version>1.2.0-incubating-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+
+ <artifactId>sort-connector-mysql-cdc</artifactId>
+ <name>Apache InLong - Sort-connector-mysql-cdc</name>
+ <packaging>jar</packaging>
+
+ <dependencies>
+
+ <dependency>
+ <groupId>com.ververica</groupId>
+ <artifactId>flink-connector-mysql-cdc</artifactId>
+ <exclusions>
+ <exclusion>
+ <artifactId>flink-connector-debezium</artifactId>
+ <groupId>com.ververica</groupId>
+ </exclusion>
+ <exclusion>
+ <artifactId>debezium-connector-mysql</artifactId>
+ <groupId>io.debezium</groupId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <dependency>
+ <groupId>io.debezium</groupId>
+ <artifactId>debezium-connector-mysql</artifactId>
+ <version>${debezium.connector.mysql.version}</version>
+ <exclusions>
+ <exclusion>
+ <artifactId>debezium-core</artifactId>
+ <groupId>io.debezium</groupId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ </dependencies>
+
+</project>
\ No newline at end of file
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/DebeziumDeserializationSchema.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/DebeziumDeserializationSchema.java
similarity index 96%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/DebeziumDeserializationSchema.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/DebeziumDeserializationSchema.java
index 93f9562f2..c8e38c989 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/DebeziumDeserializationSchema.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/DebeziumDeserializationSchema.java
@@ -16,15 +16,16 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium;
+package org.apache.inlong.sort.cdc.debezium;
import io.debezium.relational.history.TableChanges;
-import java.io.Serializable;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.api.java.typeutils.ResultTypeQueryable;
import org.apache.flink.util.Collector;
import org.apache.kafka.connect.source.SourceRecord;
+import java.io.Serializable;
+
/**
* The deserialization schema describes how to turn the Debezium SourceRecord into data types
* (Java/Scala objects) that are processed by Flink.
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/DebeziumSourceFunction.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/DebeziumSourceFunction.java
similarity index 90%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/DebeziumSourceFunction.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/DebeziumSourceFunction.java
index f620ea543..a7991252a 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/DebeziumSourceFunction.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/DebeziumSourceFunction.java
@@ -16,10 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.debezium.utils.DatabaseHistoryUtil.registerHistory;
-import static org.apache.inlong.sort.singletenant.flink.cdc.debezium.utils.DatabaseHistoryUtil.retrieveHistory;
+package org.apache.inlong.sort.cdc.debezium;
import io.debezium.document.DocumentReader;
import io.debezium.document.DocumentWriter;
@@ -27,18 +24,6 @@ import io.debezium.embedded.Connect;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.spi.OffsetCommitPolicy;
import io.debezium.heartbeat.Heartbeat;
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.nio.charset.StandardCharsets;
-import java.util.Collection;
-import java.util.Properties;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import javax.annotation.Nullable;
import org.apache.commons.collections.map.LinkedMap;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.annotation.VisibleForTesting;
@@ -60,18 +45,35 @@ import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.functions.source.RichSourceFunction;
import org.apache.flink.util.ExceptionUtils;
import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal.DebeziumChangeConsumer;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal.DebeziumChangeFetcher;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal.DebeziumOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal.DebeziumOffsetSerializer;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal.FlinkDatabaseHistory;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal.FlinkDatabaseSchemaHistory;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal.FlinkOffsetBackingStore;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal.Handover;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal.SchemaRecord;
+import org.apache.inlong.sort.cdc.debezium.internal.DebeziumChangeConsumer;
+import org.apache.inlong.sort.cdc.debezium.internal.DebeziumChangeFetcher;
+import org.apache.inlong.sort.cdc.debezium.internal.DebeziumOffset;
+import org.apache.inlong.sort.cdc.debezium.internal.DebeziumOffsetSerializer;
+import org.apache.inlong.sort.cdc.debezium.internal.FlinkDatabaseHistory;
+import org.apache.inlong.sort.cdc.debezium.internal.FlinkDatabaseSchemaHistory;
+import org.apache.inlong.sort.cdc.debezium.internal.FlinkOffsetBackingStore;
+import org.apache.inlong.sort.cdc.debezium.internal.Handover;
+import org.apache.inlong.sort.cdc.debezium.internal.SchemaRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import javax.annotation.Nullable;
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.nio.charset.StandardCharsets;
+import java.util.Collection;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.inlong.sort.cdc.debezium.utils.DatabaseHistoryUtil.registerHistory;
+import static org.apache.inlong.sort.cdc.debezium.utils.DatabaseHistoryUtil.retrieveHistory;
+
+
/**
* The {@link DebeziumSourceFunction} is a streaming data source that pulls captured change data
* from databases into Flink.
@@ -104,54 +106,64 @@ import org.slf4j.LoggerFactory;
public class DebeziumSourceFunction<T> extends RichSourceFunction<T>
implements CheckpointedFunction, CheckpointListener, ResultTypeQueryable<T> {
- private static final long serialVersionUID = -5808108641062931623L;
-
- protected static final Logger LOG = LoggerFactory.getLogger(DebeziumSourceFunction.class);
-
- /** State name of the consumer's partition offset states. */
+ /**
+ * State name of the consumer's partition offset states.
+ */
public static final String OFFSETS_STATE_NAME = "offset-states";
-
- /** State name of the consumer's history records state. */
+ /**
+ * State name of the consumer's history records state.
+ */
public static final String HISTORY_RECORDS_STATE_NAME = "history-records-states";
-
- /** The maximum number of pending non-committed checkpoints to track, to avoid memory leaks. */
+ /**
+ * The maximum number of pending non-committed checkpoints to track, to avoid memory leaks.
+ */
public static final int MAX_NUM_PENDING_CHECKPOINTS = 100;
-
/**
* The configuration represents the Debezium MySQL Connector uses the legacy implementation or
* not.
*/
public static final String LEGACY_IMPLEMENTATION_KEY = "internal.implementation";
-
- /** The configuration value represents legacy implementation. */
+ /**
+ * The configuration value represents legacy implementation.
+ */
public static final String LEGACY_IMPLEMENTATION_VALUE = "legacy";
+ protected static final Logger LOG = LoggerFactory.getLogger(DebeziumSourceFunction.class);
+ private static final long serialVersionUID = -5808108641062931623L;
// ---------------------------------------------------------------------------------------
// Properties
// ---------------------------------------------------------------------------------------
-
- /** The schema to convert from Debezium's messages into Flink's objects. */
+ /**
+ * The schema to convert from Debezium's messages into Flink's objects.
+ */
private final DebeziumDeserializationSchema<T> deserializer;
- /** User-supplied properties for Kafka. * */
+ /**
+ * User-supplied properties for Kafka. *
+ */
private final Properties properties;
- /** The specific binlog offset to read from when the first startup. */
+ /**
+ * The specific binlog offset to read from when the first startup.
+ */
private final @Nullable DebeziumOffset specificOffset;
- /** Data for pending but uncommitted offsets. */
+ /**
+ * Data for pending but uncommitted offsets.
+ */
private final LinkedMap pendingOffsetsToCommit = new LinkedMap();
-
- /** Flag indicating whether the Debezium Engine is started. */
- private volatile boolean debeziumStarted = false;
-
- /** Validator to validate the connected database satisfies the cdc connector's requirements. */
+ /**
+ * Validator to validate the connected database satisfies the cdc connector's requirements.
+ */
private final Validator validator;
+ /**
+ * Flag indicating whether the Debezium Engine is started.
+ */
+ private volatile boolean debeziumStarted = false;
// ---------------------------------------------------------------------------------------
// State
// ---------------------------------------------------------------------------------------
-
/**
* The offsets to restore to, if the consumer restores state from a checkpoint.
*
@@ -162,7 +174,9 @@ public class DebeziumSourceFunction<T> extends RichSourceFunction<T>
*/
private transient volatile String restoredOffsetState;
- /** Accessor for state in the operator state backend. */
+ /**
+ * Accessor for state in the operator state backend.
+ */
private transient ListState<byte[]> offsetState;
/**
@@ -185,13 +199,19 @@ public class DebeziumSourceFunction<T> extends RichSourceFunction<T>
*/
private transient String engineInstanceName;
- /** Consume the events from the engine and commit the offset to the engine. */
+ /**
+ * Consume the events from the engine and commit the offset to the engine.
+ */
private transient DebeziumChangeConsumer changeConsumer;
- /** The consumer to fetch records from {@link Handover}. */
+ /**
+ * The consumer to fetch records from {@link Handover}.
+ */
private transient DebeziumChangeFetcher<T> debeziumChangeFetcher;
- /** Buffer the events from the source and record the errors from the debezium. */
+ /**
+ * Buffer the events from the source and record the errors from the debezium.
+ */
private transient Handover handover;
// ---------------------------------------------------------------------------------------
@@ -508,7 +528,9 @@ public class DebeziumSourceFunction<T> extends RichSourceFunction<T>
super.close();
}
- /** Safely and gracefully stop the Debezium engine. */
+ /**
+ * Safely and gracefully stop the Debezium engine.
+ */
private void shutdownEngine() {
try {
if (engine != null) {
@@ -554,8 +576,8 @@ public class DebeziumSourceFunction<T> extends RichSourceFunction<T>
} else {
throw new IllegalStateException(
"The configured option 'debezium.internal.implementation' is 'legacy', "
- + "but the state of source is incompatible with this implementation, "
- + "you should remove the the option.");
+ + "but the state of source is incompatible with this implementation, "
+ + "you should remove the the option.");
}
} else if (FlinkDatabaseSchemaHistory.isCompatible(retrieveHistory(engineInstanceName))) {
// tries the non-legacy first
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/JsonDebeziumDeserializationSchema.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/JsonDebeziumDeserializationSchema.java
similarity index 98%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/JsonDebeziumDeserializationSchema.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/JsonDebeziumDeserializationSchema.java
index 1539a3570..9c3ba9847 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/JsonDebeziumDeserializationSchema.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/JsonDebeziumDeserializationSchema.java
@@ -16,11 +16,9 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium;
+package org.apache.inlong.sort.cdc.debezium;
import io.debezium.relational.history.TableChanges.TableChange;
-import java.util.HashMap;
-import java.util.Map;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.util.Collector;
@@ -30,6 +28,9 @@ import org.apache.kafka.connect.source.SourceRecord;
import org.apache.kafka.connect.storage.ConverterConfig;
import org.apache.kafka.connect.storage.ConverterType;
+import java.util.HashMap;
+import java.util.Map;
+
/**
* A JSON format implementation of {@link DebeziumDeserializationSchema} which deserializes the
* received {@link SourceRecord} to JSON String.
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/StringDebeziumDeserializationSchema.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/StringDebeziumDeserializationSchema.java
similarity index 96%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/StringDebeziumDeserializationSchema.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/StringDebeziumDeserializationSchema.java
index 1b7931abd..7a128747c 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/StringDebeziumDeserializationSchema.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/StringDebeziumDeserializationSchema.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium;
+package org.apache.inlong.sort.cdc.debezium;
import io.debezium.relational.history.TableChanges.TableChange;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/Validator.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/Validator.java
similarity index 94%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/Validator.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/Validator.java
index f39147673..9dd9766f8 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/Validator.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/Validator.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium;
+package org.apache.inlong.sort.cdc.debezium;
import java.io.Serializable;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/history/FlinkJsonTableChangeSerializer.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/history/FlinkJsonTableChangeSerializer.java
similarity index 99%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/history/FlinkJsonTableChangeSerializer.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/history/FlinkJsonTableChangeSerializer.java
index 856d7fa83..c8a7384ab 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/history/FlinkJsonTableChangeSerializer.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/history/FlinkJsonTableChangeSerializer.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.history;
+package org.apache.inlong.sort.cdc.debezium.history;
import io.debezium.document.Array;
import io.debezium.document.Array.Entry;
@@ -30,6 +30,7 @@ import io.debezium.relational.TableId;
import io.debezium.relational.history.TableChanges;
import io.debezium.relational.history.TableChanges.TableChange;
import io.debezium.relational.history.TableChanges.TableChangeType;
+
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumChangeConsumer.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumChangeConsumer.java
similarity index 98%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumChangeConsumer.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumChangeConsumer.java
index 53caf857c..734386c88 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumChangeConsumer.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumChangeConsumer.java
@@ -16,19 +16,20 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal;
+package org.apache.inlong.sort.cdc.debezium.internal;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.DebeziumEngine.RecordCommitter;
-import java.util.List;
-import java.util.Map;
import org.apache.flink.annotation.Internal;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.source.SourceRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.List;
+import java.util.Map;
+
/** Consume debezium change events. */
@Internal
public class DebeziumChangeConsumer
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumChangeFetcher.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumChangeFetcher.java
similarity index 96%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumChangeFetcher.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumChangeFetcher.java
index 9f37164d7..d73ef2888 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumChangeFetcher.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumChangeFetcher.java
@@ -16,27 +16,28 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal;
+package org.apache.inlong.sort.cdc.debezium.internal;
import io.debezium.connector.SnapshotRecord;
import io.debezium.data.Envelope;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
-import java.util.ArrayDeque;
-import java.util.List;
-import java.util.Map;
-import java.util.Queue;
import org.apache.commons.collections.CollectionUtils;
import org.apache.flink.annotation.Internal;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.util.Collector;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.DebeziumDeserializationSchema;
+import org.apache.inlong.sort.cdc.debezium.DebeziumDeserializationSchema;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.ArrayDeque;
+import java.util.List;
+import java.util.Map;
+import java.util.Queue;
+
/**
* A Handler that convert change messages from {@link DebeziumEngine} to data in Flink. Considering
* Debezium in different mode has different strategies to hold the lock, e.g. snapshot, the handler
@@ -59,10 +60,14 @@ public class DebeziumChangeFetcher<T> {
*/
private final Object checkpointLock;
- /** The schema to convert from Debezium's messages into Flink's objects. */
+ /**
+ * The schema to convert from Debezium's messages into Flink's objects.
+ */
private final DebeziumDeserializationSchema<T> deserialization;
- /** A collector to emit records in batch (bundle). */
+ /**
+ * A collector to emit records in batch (bundle).
+ */
private final DebeziumCollector debeziumCollector;
private final DebeziumOffset debeziumOffset;
@@ -70,21 +75,22 @@ public class DebeziumChangeFetcher<T> {
private final DebeziumOffsetSerializer stateSerializer;
private final String heartbeatTopicPrefix;
-
- private boolean isInDbSnapshotPhase;
-
private final Handover handover;
-
+ private boolean isInDbSnapshotPhase;
private volatile boolean isRunning = true;
// ---------------------------------------------------------------------------------------
// Metrics
// ---------------------------------------------------------------------------------------
- /** Timestamp of change event. If the event is a snapshot event, the timestamp is 0L. */
+ /**
+ * Timestamp of change event. If the event is a snapshot event, the timestamp is 0L.
+ */
private volatile long messageTimestamp = 0L;
- /** The last record processing time. */
+ /**
+ * The last record processing time.
+ */
private volatile long processTime = 0L;
/**
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumOffset.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumOffset.java
similarity index 97%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumOffset.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumOffset.java
index 491c61707..085c45626 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumOffset.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumOffset.java
@@ -16,11 +16,12 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal;
+package org.apache.inlong.sort.cdc.debezium.internal;
+
+import org.apache.flink.annotation.Internal;
import java.io.Serializable;
import java.util.Map;
-import org.apache.flink.annotation.Internal;
/**
* The state that the Flink Debezium Consumer holds for each instance.
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumOffsetSerializer.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumOffsetSerializer.java
similarity index 95%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumOffsetSerializer.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumOffsetSerializer.java
index 9f193a8d9..e624f4b70 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/DebeziumOffsetSerializer.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/DebeziumOffsetSerializer.java
@@ -16,12 +16,13 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal;
+package org.apache.inlong.sort.cdc.debezium.internal;
-import java.io.IOException;
import org.apache.flink.annotation.Internal;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
+import java.io.IOException;
+
/** Serializer implementation for a {@link DebeziumOffset}. */
@Internal
public class DebeziumOffsetSerializer {
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/EmbeddedEngineChangeEvent.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/EmbeddedEngineChangeEvent.java
similarity index 96%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/EmbeddedEngineChangeEvent.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/EmbeddedEngineChangeEvent.java
index b2634f354..43baea8d8 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/EmbeddedEngineChangeEvent.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/EmbeddedEngineChangeEvent.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal;
+package org.apache.inlong.sort.cdc.debezium.internal;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.RecordChangeEvent;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/FlinkDatabaseHistory.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/FlinkDatabaseHistory.java
similarity index 89%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/FlinkDatabaseHistory.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/FlinkDatabaseHistory.java
index 44d206f3b..c2f12272e 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/FlinkDatabaseHistory.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/FlinkDatabaseHistory.java
@@ -16,11 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.debezium.utils.DatabaseHistoryUtil.registerHistory;
-import static org.apache.inlong.sort.singletenant.flink.cdc.debezium.utils.DatabaseHistoryUtil.removeHistory;
-import static org.apache.inlong.sort.singletenant.flink.cdc.debezium.utils.DatabaseHistoryUtil.retrieveHistory;
+package org.apache.inlong.sort.cdc.debezium.internal;
import io.debezium.config.Configuration;
import io.debezium.relational.history.AbstractDatabaseHistory;
@@ -28,10 +24,15 @@ import io.debezium.relational.history.DatabaseHistoryException;
import io.debezium.relational.history.DatabaseHistoryListener;
import io.debezium.relational.history.HistoryRecord;
import io.debezium.relational.history.HistoryRecordComparator;
+
import java.util.Collection;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.function.Consumer;
+import static org.apache.inlong.sort.cdc.debezium.utils.DatabaseHistoryUtil.registerHistory;
+import static org.apache.inlong.sort.cdc.debezium.utils.DatabaseHistoryUtil.removeHistory;
+import static org.apache.inlong.sort.cdc.debezium.utils.DatabaseHistoryUtil.retrieveHistory;
+
/**
* Inspired from {@link io.debezium.relational.history.MemoryDatabaseHistory} but we will store the
* HistoryRecords in Flink's state for persistence.
@@ -47,7 +48,24 @@ public class FlinkDatabaseHistory extends AbstractDatabaseHistory {
private ConcurrentLinkedQueue<SchemaRecord> schemaRecords;
private String instanceName;
- /** Gets the registered HistoryRecords under the given instance name. */
+ /**
+ * Determine whether the {@link FlinkDatabaseHistory} is compatible with the specified state.
+ */
+ public static boolean isCompatible(Collection<SchemaRecord> records) {
+ for (SchemaRecord record : records) {
+ // check the source/position/ddl is not null
+ if (!record.isHistoryRecord()) {
+ return false;
+ } else {
+ break;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Gets the registered HistoryRecords under the given instance name.
+ */
private ConcurrentLinkedQueue<SchemaRecord> getRegisteredHistoryRecord(String instanceName) {
Collection<SchemaRecord> historyRecords = retrieveHistory(instanceName);
return new ConcurrentLinkedQueue<>(historyRecords);
@@ -98,19 +116,4 @@ public class FlinkDatabaseHistory extends AbstractDatabaseHistory {
public String toString() {
return "Flink Database History";
}
-
- /**
- * Determine whether the {@link FlinkDatabaseHistory} is compatible with the specified state.
- */
- public static boolean isCompatible(Collection<SchemaRecord> records) {
- for (SchemaRecord record : records) {
- // check the source/position/ddl is not null
- if (!record.isHistoryRecord()) {
- return false;
- } else {
- break;
- }
- }
- return true;
- }
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/FlinkDatabaseSchemaHistory.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/FlinkDatabaseSchemaHistory.java
similarity index 93%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/FlinkDatabaseSchemaHistory.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/FlinkDatabaseSchemaHistory.java
index f9f8bd943..06142d6e5 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/FlinkDatabaseSchemaHistory.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/FlinkDatabaseSchemaHistory.java
@@ -16,12 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal;
-
-import static io.debezium.relational.history.TableChanges.TableChange;
-import static org.apache.inlong.sort.singletenant.flink.cdc.debezium.utils.DatabaseHistoryUtil.registerHistory;
-import static org.apache.inlong.sort.singletenant.flink.cdc.debezium.utils.DatabaseHistoryUtil.removeHistory;
-import static org.apache.inlong.sort.singletenant.flink.cdc.debezium.utils.DatabaseHistoryUtil.retrieveHistory;
+package org.apache.inlong.sort.cdc.debezium.internal;
import io.debezium.config.Configuration;
import io.debezium.relational.TableId;
@@ -34,11 +29,17 @@ import io.debezium.relational.history.HistoryRecord;
import io.debezium.relational.history.HistoryRecordComparator;
import io.debezium.relational.history.TableChanges;
import io.debezium.schema.DatabaseSchema;
+import org.apache.inlong.sort.cdc.debezium.history.FlinkJsonTableChangeSerializer;
+
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.history.FlinkJsonTableChangeSerializer;
+
+import static io.debezium.relational.history.TableChanges.TableChange;
+import static org.apache.inlong.sort.cdc.debezium.utils.DatabaseHistoryUtil.registerHistory;
+import static org.apache.inlong.sort.cdc.debezium.utils.DatabaseHistoryUtil.removeHistory;
+import static org.apache.inlong.sort.cdc.debezium.utils.DatabaseHistoryUtil.retrieveHistory;
/**
* The {@link FlinkDatabaseSchemaHistory} only stores the latest schema of the monitored tables.
@@ -64,6 +65,21 @@ public class FlinkDatabaseSchemaHistory implements DatabaseHistory {
private boolean skipUnparseableDDL;
private boolean useCatalogBeforeSchema;
+ /**
+ * Determine whether the {@link FlinkDatabaseSchemaHistory} is compatible with the specified
+ * state.
+ */
+ public static boolean isCompatible(Collection<SchemaRecord> records) {
+ for (SchemaRecord record : records) {
+ if (!record.isTableChangeRecord()) {
+ return false;
+ } else {
+ break;
+ }
+ }
+ return true;
+ }
+
@Override
public void configure(
Configuration config,
@@ -181,19 +197,4 @@ public class FlinkDatabaseSchemaHistory implements DatabaseHistory {
public boolean skipUnparseableDdlStatements() {
return skipUnparseableDDL;
}
-
- /**
- * Determine whether the {@link FlinkDatabaseSchemaHistory} is compatible with the specified
- * state.
- */
- public static boolean isCompatible(Collection<SchemaRecord> records) {
- for (SchemaRecord record : records) {
- if (!record.isTableChangeRecord()) {
- return false;
- } else {
- break;
- }
- }
- return true;
- }
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/FlinkOffsetBackingStore.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/FlinkOffsetBackingStore.java
similarity index 98%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/FlinkOffsetBackingStore.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/FlinkOffsetBackingStore.java
index 838d0e98c..bbd2ef9e3 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/FlinkOffsetBackingStore.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/FlinkOffsetBackingStore.java
@@ -16,10 +16,22 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal;
+package org.apache.inlong.sort.cdc.debezium.internal;
import io.debezium.embedded.EmbeddedEngine;
import io.debezium.engine.DebeziumEngine;
+import org.apache.inlong.sort.cdc.debezium.DebeziumSourceFunction;
+import org.apache.kafka.common.utils.ThreadUtils;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.kafka.connect.json.JsonConverter;
+import org.apache.kafka.connect.runtime.WorkerConfig;
+import org.apache.kafka.connect.storage.Converter;
+import org.apache.kafka.connect.storage.OffsetBackingStore;
+import org.apache.kafka.connect.storage.OffsetStorageWriter;
+import org.apache.kafka.connect.util.Callback;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
@@ -32,17 +44,6 @@ import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.DebeziumSourceFunction;
-import org.apache.kafka.common.utils.ThreadUtils;
-import org.apache.kafka.connect.errors.ConnectException;
-import org.apache.kafka.connect.json.JsonConverter;
-import org.apache.kafka.connect.runtime.WorkerConfig;
-import org.apache.kafka.connect.storage.Converter;
-import org.apache.kafka.connect.storage.OffsetBackingStore;
-import org.apache.kafka.connect.storage.OffsetStorageWriter;
-import org.apache.kafka.connect.util.Callback;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* A implementation of {@link OffsetBackingStore} backed on Flink's state mechanism.
@@ -56,11 +57,9 @@ import org.slf4j.LoggerFactory;
* @see DebeziumSourceFunction
*/
public class FlinkOffsetBackingStore implements OffsetBackingStore {
- private static final Logger LOG = LoggerFactory.getLogger(FlinkOffsetBackingStore.class);
-
public static final String OFFSET_STATE_VALUE = "offset.storage.flink.state.value";
public static final int FLUSH_TIMEOUT_SECONDS = 10;
-
+ private static final Logger LOG = LoggerFactory.getLogger(FlinkOffsetBackingStore.class);
protected Map<ByteBuffer, ByteBuffer> data = new HashMap<>();
protected ExecutorService executor;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/Handover.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/Handover.java
similarity index 98%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/Handover.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/Handover.java
index 3828cb042..7b45e43b7 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/Handover.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/Handover.java
@@ -16,22 +16,23 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal;
-
-import static org.apache.flink.util.Preconditions.checkNotNull;
+package org.apache.inlong.sort.cdc.debezium.internal;
import io.debezium.engine.ChangeEvent;
-import java.io.Closeable;
-import java.util.Collections;
-import java.util.List;
-import javax.annotation.concurrent.GuardedBy;
-import javax.annotation.concurrent.ThreadSafe;
import org.apache.flink.annotation.Internal;
import org.apache.flink.util.ExceptionUtils;
import org.apache.kafka.connect.source.SourceRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import javax.annotation.concurrent.GuardedBy;
+import javax.annotation.concurrent.ThreadSafe;
+import java.io.Closeable;
+import java.util.Collections;
+import java.util.List;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
/**
* The Handover is a utility to hand over data (a buffer of records) and exception from a
* <i>producer</i> thread to a <i>consumer</i> thread. It effectively behaves like a "size one
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/SchemaRecord.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/SchemaRecord.java
similarity index 97%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/SchemaRecord.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/SchemaRecord.java
index 210d83b25..8022deff4 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/internal/SchemaRecord.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/internal/SchemaRecord.java
@@ -16,14 +16,15 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal;
+package org.apache.inlong.sort.cdc.debezium.internal;
import io.debezium.document.Document;
import io.debezium.document.DocumentWriter;
import io.debezium.relational.history.HistoryRecord;
import io.debezium.relational.history.TableChanges.TableChange;
-import java.io.IOException;
+
import javax.annotation.Nullable;
+import java.io.IOException;
/**
* The Record represents a schema change event, it contains either one {@link HistoryRecord} or
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/AppendMetadataCollector.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/AppendMetadataCollector.java
similarity index 97%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/AppendMetadataCollector.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/AppendMetadataCollector.java
index 005254f6c..d61292f79 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/AppendMetadataCollector.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/AppendMetadataCollector.java
@@ -16,10 +16,9 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.table;
+package org.apache.inlong.sort.cdc.debezium.table;
import io.debezium.relational.history.TableChanges.TableChange;
-import java.io.Serializable;
import org.apache.flink.annotation.Internal;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
@@ -27,6 +26,8 @@ import org.apache.flink.table.data.utils.JoinedRowData;
import org.apache.flink.util.Collector;
import org.apache.kafka.connect.source.SourceRecord;
+import java.io.Serializable;
+
/**
* Emits a row with physical fields and metadata fields.
*/
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/DebeziumOptions.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/DebeziumOptions.java
similarity index 96%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/DebeziumOptions.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/DebeziumOptions.java
index c40cfea3e..864ab57c0 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/DebeziumOptions.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/DebeziumOptions.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.table;
+package org.apache.inlong.sort.cdc.debezium.table;
import java.util.Map;
import java.util.Properties;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/DeserializationRuntimeConverter.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/DeserializationRuntimeConverter.java
similarity index 94%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/DeserializationRuntimeConverter.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/DeserializationRuntimeConverter.java
index 0b4d47029..8c7db32a5 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/DeserializationRuntimeConverter.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/DeserializationRuntimeConverter.java
@@ -16,11 +16,12 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.table;
+package org.apache.inlong.sort.cdc.debezium.table;
-import java.io.Serializable;
import org.apache.kafka.connect.data.Schema;
+import java.io.Serializable;
+
/**
* Runtime converter that converts objects of Debezium into objects of Flink Table & SQL internal
* data structures.
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/DeserializationRuntimeConverterFactory.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/DeserializationRuntimeConverterFactory.java
similarity index 96%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/DeserializationRuntimeConverterFactory.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/DeserializationRuntimeConverterFactory.java
index 3d0861062..e919db491 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/DeserializationRuntimeConverterFactory.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/DeserializationRuntimeConverterFactory.java
@@ -16,12 +16,13 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.table;
+package org.apache.inlong.sort.cdc.debezium.table;
+
+import org.apache.flink.table.types.logical.LogicalType;
import java.io.Serializable;
import java.time.ZoneId;
import java.util.Optional;
-import org.apache.flink.table.types.logical.LogicalType;
/**
* Factory to create {@link DeserializationRuntimeConverter} according to {@link LogicalType}. It's
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/MetadataConverter.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/MetadataConverter.java
similarity index 95%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/MetadataConverter.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/MetadataConverter.java
index a63c963ac..bc187ee0c 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/MetadataConverter.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/MetadataConverter.java
@@ -16,15 +16,16 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.table;
+package org.apache.inlong.sort.cdc.debezium.table;
import io.debezium.relational.history.TableChanges;
-import java.io.Serializable;
-import javax.annotation.Nullable;
import org.apache.flink.annotation.Internal;
import org.apache.flink.table.data.RowData;
import org.apache.kafka.connect.source.SourceRecord;
+import javax.annotation.Nullable;
+import java.io.Serializable;
+
/**
* A converter converts {@link SourceRecord} metadata into Flink internal data structures.
*/
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/RowDataDebeziumDeserializeSchema.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/RowDataDebeziumDeserializeSchema.java
similarity index 97%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/RowDataDebeziumDeserializeSchema.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/RowDataDebeziumDeserializeSchema.java
index 153748e65..fb29182e3 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/table/RowDataDebeziumDeserializeSchema.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/table/RowDataDebeziumDeserializeSchema.java
@@ -16,9 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.table;
-
-import static org.apache.flink.util.Preconditions.checkNotNull;
+package org.apache.inlong.sort.cdc.debezium.table;
import io.debezium.data.Envelope;
import io.debezium.data.SpecialValueDecimal;
@@ -31,20 +29,6 @@ import io.debezium.time.NanoTime;
import io.debezium.time.NanoTimestamp;
import io.debezium.time.Timestamp;
import io.debezium.time.ZonedTimestamp;
-import java.io.Serializable;
-import java.math.BigDecimal;
-import java.nio.ByteBuffer;
-import java.time.Instant;
-import java.time.LocalDate;
-import java.time.LocalDateTime;
-import java.time.ZoneId;
-import java.time.ZonedDateTime;
-import java.time.format.DateTimeFormatter;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import javax.validation.constraints.NotNull;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.table.data.DecimalData;
import org.apache.flink.table.data.GenericRowData;
@@ -56,8 +40,8 @@ import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.RowType;
import org.apache.flink.types.RowKind;
import org.apache.flink.util.Collector;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.DebeziumDeserializationSchema;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.utils.TemporalConversions;
+import org.apache.inlong.sort.cdc.debezium.DebeziumDeserializationSchema;
+import org.apache.inlong.sort.cdc.debezium.utils.TemporalConversions;
import org.apache.kafka.connect.data.ConnectSchema;
import org.apache.kafka.connect.data.Decimal;
import org.apache.kafka.connect.data.Field;
@@ -67,6 +51,23 @@ import org.apache.kafka.connect.source.SourceRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import javax.validation.constraints.NotNull;
+import java.io.Serializable;
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
+import java.time.Instant;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.ZoneId;
+import java.time.ZonedDateTime;
+import java.time.format.DateTimeFormatter;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
/**
* Deserialization schema from Debezium object to Flink Table/SQL internal data structure {@link
* RowData}.
@@ -83,7 +84,7 @@ public final class RowDataDebeziumDeserializeSchema
private static final DateTimeFormatter timeFormatter = DateTimeFormatter.ISO_TIME;
private static final DateTimeFormatter timestampFormatter = DateTimeFormatter.ofPattern(
- "yyyy-MM-dd HH:mm:ss");
+ "yyyy-MM-dd HH:mm:ss");
/**
* TypeInformation of the produced {@link RowData}. *
@@ -123,7 +124,7 @@ public final class RowDataDebeziumDeserializeSchema
ZoneId serverTimeZone,
boolean appendSource,
DeserializationRuntimeConverterFactory userDefinedConverterFactory,
- boolean migrateAll) {
+ boolean migrateAll) {
this.hasMetadata = checkNotNull(metadataConverters).length > 0;
this.appendMetadataCollector = new AppendMetadataCollector(metadataConverters, migrateAll);
this.migrateAll = migrateAll;
@@ -145,103 +146,6 @@ public final class RowDataDebeziumDeserializeSchema
return new Builder();
}
- /**
- * Creates a runtime converter which is null safe.
- */
- private DeserializationRuntimeConverter createConverter(
- LogicalType type,
- ZoneId serverTimeZone,
- DeserializationRuntimeConverterFactory userDefinedConverterFactory) {
- return wrapIntoNullableConverter(
- createNotNullConverter(type, serverTimeZone, userDefinedConverterFactory));
- }
-
- /**
- * Creates a runtime converter which assuming input object is not null.
- */
- public DeserializationRuntimeConverter createNotNullConverter(
- LogicalType type,
- ZoneId serverTimeZone,
- DeserializationRuntimeConverterFactory userDefinedConverterFactory) {
- // user defined converter has a higher resolve order
- Optional<DeserializationRuntimeConverter> converter =
- userDefinedConverterFactory.createUserDefinedConverter(type, serverTimeZone);
- if (converter.isPresent()) {
- return converter.get();
- }
-
- // if no matched user defined converter, fallback to the default converter
- switch (type.getTypeRoot()) {
- case NULL:
- return new DeserializationRuntimeConverter() {
-
- private static final long serialVersionUID = 1L;
-
- @Override
- public Object convert(Object dbzObj, Schema schema) {
- return null;
- }
- };
- case BOOLEAN:
- return convertToBoolean();
- case TINYINT:
- return new DeserializationRuntimeConverter() {
-
- private static final long serialVersionUID = 1L;
-
- @Override
- public Object convert(Object dbzObj, Schema schema) {
- return Byte.parseByte(dbzObj.toString());
- }
- };
- case SMALLINT:
- return new DeserializationRuntimeConverter() {
-
- private static final long serialVersionUID = 1L;
-
- @Override
- public Object convert(Object dbzObj, Schema schema) {
- return Short.parseShort(dbzObj.toString());
- }
- };
- case INTEGER:
- case INTERVAL_YEAR_MONTH:
- return convertToInt();
- case BIGINT:
- case INTERVAL_DAY_TIME:
- return convertToLong();
- case DATE:
- return convertToDate();
- case TIME_WITHOUT_TIME_ZONE:
- return convertToTime();
- case TIMESTAMP_WITHOUT_TIME_ZONE:
- return convertToTimestamp(serverTimeZone);
- case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
- return convertToLocalTimeZoneTimestamp(serverTimeZone);
- case FLOAT:
- return convertToFloat();
- case DOUBLE:
- return convertToDouble();
- case CHAR:
- case VARCHAR:
- return convertToString();
- case BINARY:
- case VARBINARY:
- return convertToBinary();
- case DECIMAL:
- return createDecimalConverter((DecimalType) type);
- case ROW:
- return createRowConverter(
- (RowType) type, serverTimeZone, userDefinedConverterFactory);
- case ARRAY:
- case MAP:
- case MULTISET:
- case RAW:
- default:
- throw new UnsupportedOperationException("Unsupported type: " + type);
- }
- }
-
private static DeserializationRuntimeConverter convertToBoolean() {
return new DeserializationRuntimeConverter() {
@@ -334,10 +238,6 @@ public final class RowDataDebeziumDeserializeSchema
};
}
- // -------------------------------------------------------------------------------------
- // Builder
- // -------------------------------------------------------------------------------------
-
private static DeserializationRuntimeConverter convertToDate() {
return new DeserializationRuntimeConverter() {
@@ -350,10 +250,6 @@ public final class RowDataDebeziumDeserializeSchema
};
}
- // -------------------------------------------------------------------------------------
- // Runtime Converters
- // -------------------------------------------------------------------------------------
-
private static DeserializationRuntimeConverter convertToTime() {
return new DeserializationRuntimeConverter() {
@@ -379,11 +275,9 @@ public final class RowDataDebeziumDeserializeSchema
};
}
- // --------------------------------------------------------------------------------
- // IMPORTANT! We use anonymous classes instead of lambdas for a reason here. It is
- // necessary because the maven shade plugin cannot relocate classes in
- // SerializedLambdas (MSHADE-260).
- // --------------------------------------------------------------------------------
+ // -------------------------------------------------------------------------------------
+ // Builder
+ // -------------------------------------------------------------------------------------
private static DeserializationRuntimeConverter convertToTimestamp(ZoneId serverTimeZone) {
return new DeserializationRuntimeConverter() {
@@ -415,6 +309,10 @@ public final class RowDataDebeziumDeserializeSchema
};
}
+ // -------------------------------------------------------------------------------------
+ // Runtime Converters
+ // -------------------------------------------------------------------------------------
+
private static DeserializationRuntimeConverter convertToLocalTimeZoneTimestamp(
ZoneId serverTimeZone) {
return new DeserializationRuntimeConverter() {
@@ -439,6 +337,12 @@ public final class RowDataDebeziumDeserializeSchema
};
}
+ // --------------------------------------------------------------------------------
+ // IMPORTANT! We use anonymous classes instead of lambdas for a reason here. It is
+ // necessary because the maven shade plugin cannot relocate classes in
+ // SerializedLambdas (MSHADE-260).
+ // --------------------------------------------------------------------------------
+
private static DeserializationRuntimeConverter convertToString() {
return new DeserializationRuntimeConverter() {
@@ -507,6 +411,129 @@ public final class RowDataDebeziumDeserializeSchema
};
}
+ private static Object convertField(
+ DeserializationRuntimeConverter fieldConverter, Object fieldValue, Schema fieldSchema)
+ throws Exception {
+ if (fieldValue == null) {
+ return null;
+ } else {
+ return fieldConverter.convert(fieldValue, fieldSchema);
+ }
+ }
+
+ private static DeserializationRuntimeConverter wrapIntoNullableConverter(
+ DeserializationRuntimeConverter converter) {
+ return new DeserializationRuntimeConverter() {
+
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ public Object convert(Object dbzObj, Schema schema) throws Exception {
+ if (dbzObj == null) {
+ return null;
+ }
+ return converter.convert(dbzObj, schema);
+ }
+ };
+ }
+
+ /**
+ * Creates a runtime converter which is null safe.
+ */
+ private DeserializationRuntimeConverter createConverter(
+ LogicalType type,
+ ZoneId serverTimeZone,
+ DeserializationRuntimeConverterFactory userDefinedConverterFactory) {
+ return wrapIntoNullableConverter(
+ createNotNullConverter(type, serverTimeZone, userDefinedConverterFactory));
+ }
+
+ /**
+ * Creates a runtime converter which assuming input object is not null.
+ */
+ public DeserializationRuntimeConverter createNotNullConverter(
+ LogicalType type,
+ ZoneId serverTimeZone,
+ DeserializationRuntimeConverterFactory userDefinedConverterFactory) {
+ // user defined converter has a higher resolve order
+ Optional<DeserializationRuntimeConverter> converter =
+ userDefinedConverterFactory.createUserDefinedConverter(type, serverTimeZone);
+ if (converter.isPresent()) {
+ return converter.get();
+ }
+
+ // if no matched user defined converter, fallback to the default converter
+ switch (type.getTypeRoot()) {
+ case NULL:
+ return new DeserializationRuntimeConverter() {
+
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ public Object convert(Object dbzObj, Schema schema) {
+ return null;
+ }
+ };
+ case BOOLEAN:
+ return convertToBoolean();
+ case TINYINT:
+ return new DeserializationRuntimeConverter() {
+
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ public Object convert(Object dbzObj, Schema schema) {
+ return Byte.parseByte(dbzObj.toString());
+ }
+ };
+ case SMALLINT:
+ return new DeserializationRuntimeConverter() {
+
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ public Object convert(Object dbzObj, Schema schema) {
+ return Short.parseShort(dbzObj.toString());
+ }
+ };
+ case INTEGER:
+ case INTERVAL_YEAR_MONTH:
+ return convertToInt();
+ case BIGINT:
+ case INTERVAL_DAY_TIME:
+ return convertToLong();
+ case DATE:
+ return convertToDate();
+ case TIME_WITHOUT_TIME_ZONE:
+ return convertToTime();
+ case TIMESTAMP_WITHOUT_TIME_ZONE:
+ return convertToTimestamp(serverTimeZone);
+ case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
+ return convertToLocalTimeZoneTimestamp(serverTimeZone);
+ case FLOAT:
+ return convertToFloat();
+ case DOUBLE:
+ return convertToDouble();
+ case CHAR:
+ case VARCHAR:
+ return convertToString();
+ case BINARY:
+ case VARBINARY:
+ return convertToBinary();
+ case DECIMAL:
+ return createDecimalConverter((DecimalType) type);
+ case ROW:
+ return createRowConverter(
+ (RowType) type, serverTimeZone, userDefinedConverterFactory);
+ case ARRAY:
+ case MAP:
+ case MULTISET:
+ case RAW:
+ default:
+ throw new UnsupportedOperationException("Unsupported type: " + type);
+ }
+ }
+
private DeserializationRuntimeConverter createRowConverter(
RowType rowType,
ZoneId serverTimeZone,
@@ -542,7 +569,7 @@ public final class RowDataDebeziumDeserializeSchema
Object fieldValue = struct.getWithoutDefault(fieldName);
Schema fieldSchema = schema.field(fieldName).schema();
Object convertedField =
- convertField(fieldConverters[i], fieldValue, fieldSchema);
+ convertField(fieldConverters[i], fieldValue, fieldSchema);
row.setField(i, convertedField);
}
}
@@ -561,7 +588,7 @@ public final class RowDataDebeziumDeserializeSchema
private static final long serialVersionUID = 1L;
@Override
- public Object convert(Object dbzObj, Schema schema) {
+ public Object convert(Object dbzObj, Schema schema) {
ConnectSchema connectSchema = (ConnectSchema) schema;
List<Field> fields = connectSchema.fields();
@@ -592,6 +619,7 @@ public final class RowDataDebeziumDeserializeSchema
/**
* transform debezium time format to database format
+ *
* @param fieldValue
* @param schemaName
* @return
@@ -600,7 +628,7 @@ public final class RowDataDebeziumDeserializeSchema
switch (schemaName) {
case MicroTime.SCHEMA_NAME:
Instant instant = Instant.ofEpochMilli((Long) fieldValue / 1000);
- fieldValue = timeFormatter.format(LocalDateTime.ofInstant(instant,serverTimeZone));
+ fieldValue = timeFormatter.format(LocalDateTime.ofInstant(instant, serverTimeZone));
break;
case Date.SCHEMA_NAME:
fieldValue = dateFormatter.format(LocalDate.ofEpochDay((Integer) fieldValue));
@@ -608,12 +636,12 @@ public final class RowDataDebeziumDeserializeSchema
case ZonedTimestamp.SCHEMA_NAME:
ZonedDateTime zonedDateTime = ZonedDateTime.parse((CharSequence) fieldValue);
fieldValue = timestampFormatter.format(zonedDateTime
- .withZoneSameInstant(serverTimeZone).toLocalDateTime());
+ .withZoneSameInstant(serverTimeZone).toLocalDateTime());
break;
case Timestamp.SCHEMA_NAME:
Instant instantTime = Instant.ofEpochMilli((Long) fieldValue);
fieldValue = timestampFormatter.format(LocalDateTime.ofInstant(instantTime,
- serverTimeZone));
+ serverTimeZone));
break;
default:
LOG.error("parse schema {} error", schemaName);
@@ -621,32 +649,6 @@ public final class RowDataDebeziumDeserializeSchema
return fieldValue;
}
- private static Object convertField(
- DeserializationRuntimeConverter fieldConverter, Object fieldValue, Schema fieldSchema)
- throws Exception {
- if (fieldValue == null) {
- return null;
- } else {
- return fieldConverter.convert(fieldValue, fieldSchema);
- }
- }
-
- private static DeserializationRuntimeConverter wrapIntoNullableConverter(
- DeserializationRuntimeConverter converter) {
- return new DeserializationRuntimeConverter() {
-
- private static final long serialVersionUID = 1L;
-
- @Override
- public Object convert(Object dbzObj, Schema schema) throws Exception {
- if (dbzObj == null) {
- return null;
- }
- return converter.convert(dbzObj, schema);
- }
- };
- }
-
@Override
public void deserialize(SourceRecord record, Collector<RowData> out) throws Exception {
deserialize(record, out, null);
@@ -654,7 +656,7 @@ public final class RowDataDebeziumDeserializeSchema
@Override
public void deserialize(SourceRecord record, Collector<RowData> out,
- TableChange tableSchema)
+ TableChange tableSchema)
throws Exception {
Envelope.Operation op = Envelope.operationFor(record);
Struct value = (Struct) record.value();
@@ -697,7 +699,7 @@ public final class RowDataDebeziumDeserializeSchema
}
private void emit(SourceRecord inRecord, RowData physicalRow,
- TableChange tableChange, Collector<RowData> collector
+ TableChange tableChange, Collector<RowData> collector
) {
if (appendSource) {
physicalRow.setRowKind(RowKind.INSERT);
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/utils/DatabaseHistoryUtil.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/utils/DatabaseHistoryUtil.java
similarity index 86%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/utils/DatabaseHistoryUtil.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/utils/DatabaseHistoryUtil.java
index cbadb1c5f..e7e352bd5 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/utils/DatabaseHistoryUtil.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/utils/DatabaseHistoryUtil.java
@@ -16,39 +16,39 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.utils;
+package org.apache.inlong.sort.cdc.debezium.utils;
import io.debezium.relational.history.DatabaseHistory;
+import org.apache.inlong.sort.cdc.debezium.internal.SchemaRecord;
+
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.DebeziumSourceFunction;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal.SchemaRecord;
/**
- * Util to safely visit schema history between {@link DatabaseHistory} and {@link
- * DebeziumSourceFunction}.
+ * Util to safely visit schema history between {@link DatabaseHistory} and {@link DebeziumSourceFunction}.
*/
public class DatabaseHistoryUtil {
- private DatabaseHistoryUtil() {
- // do nothing
- }
-
/**
* Structure to maintain the current schema history. The content in {@link SchemaRecord} is up
* to the implementation of the {@link DatabaseHistory}.
*/
private static final Map<String, Collection<SchemaRecord>> HISTORY = new HashMap<>();
-
/**
* The schema history will be clean up once {@link DatabaseHistory#stop()}, the checkpoint
* should fail when this happens.
*/
private static final Map<String, Boolean> HISTORY_CLEANUP_STATUS = new HashMap<>();
- /** Registers history of schema safely. */
+ private DatabaseHistoryUtil() {
+ // do nothing
+ }
+
+ /**
+ * Registers history of schema safely.
+ */
public static void registerHistory(String engineName, Collection<SchemaRecord> engineHistory) {
synchronized (HISTORY) {
HISTORY.put(engineName, engineHistory);
@@ -56,7 +56,9 @@ public class DatabaseHistoryUtil {
}
}
- /** Remove history of schema safely. */
+ /**
+ * Remove history of schema safely.
+ */
public static void removeHistory(String engineName) {
synchronized (HISTORY) {
HISTORY_CLEANUP_STATUS.put(engineName, true);
@@ -76,7 +78,7 @@ public class DatabaseHistoryUtil {
String.format(
"Retrieve schema history failed, the schema records for engine %s has been removed,"
+ " this might because the debezium engine "
- + "has been shutdown due to other errors.",
+ + "has been shutdown due to other errors.",
engineName));
} else {
return HISTORY.getOrDefault(engineName, Collections.emptyList());
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/utils/TemporalConversions.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/utils/TemporalConversions.java
similarity index 99%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/utils/TemporalConversions.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/utils/TemporalConversions.java
index eb8dfc4e8..22ce7444f 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/debezium/utils/TemporalConversions.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/debezium/utils/TemporalConversions.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.debezium.utils;
+package org.apache.inlong.sort.cdc.debezium.utils;
import java.time.Duration;
import java.time.Instant;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/MySqlSource.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/MySqlSource.java
similarity index 86%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/MySqlSource.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/MySqlSource.java
index 18fbeaf82..721de08f3 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/MySqlSource.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/MySqlSource.java
@@ -16,27 +16,28 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql;
-
-import static org.apache.flink.util.Preconditions.checkNotNull;
-import static org.apache.inlong.sort.singletenant.flink.cdc.debezium.DebeziumSourceFunction.LEGACY_IMPLEMENTATION_KEY;
-import static org.apache.inlong.sort.singletenant.flink.cdc.debezium.DebeziumSourceFunction.LEGACY_IMPLEMENTATION_VALUE;
+package org.apache.inlong.sort.cdc.mysql;
import io.debezium.connector.mysql.MySqlConnector;
+import org.apache.inlong.sort.cdc.debezium.DebeziumDeserializationSchema;
+import org.apache.inlong.sort.cdc.debezium.DebeziumSourceFunction;
+import org.apache.inlong.sort.cdc.debezium.internal.DebeziumOffset;
+import org.apache.inlong.sort.cdc.mysql.table.StartupOptions;
+
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.DebeziumDeserializationSchema;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.DebeziumSourceFunction;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.internal.DebeziumOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.table.StartupOptions;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
+import static org.apache.inlong.sort.cdc.debezium.DebeziumSourceFunction.LEGACY_IMPLEMENTATION_KEY;
+import static org.apache.inlong.sort.cdc.debezium.DebeziumSourceFunction.LEGACY_IMPLEMENTATION_VALUE;
/**
* A builder to build a SourceFunction which can read snapshot and continue to consume binlog.
*
- * @deprecated please use {@link org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.MySqlSource} instead
- * which supports more rich features, e.g. parallel reading from historical data. The {@link
- * MySqlSource} will be dropped in the future version.
+ * @deprecated please use {@link org.apache.inlong.sort.cdc.mysql.source.MySqlSource} instead
+ * which supports more rich features, e.g. parallel reading from historical data. The {@link
+ * MySqlSource} will be dropped in the future version.
*/
@Deprecated
public class MySqlSource {
@@ -51,9 +52,9 @@ public class MySqlSource {
* Builder class of {@link MySqlSource}.
*
* @deprecated please use {@link
- * org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.MySqlSource#builder()} instead which supports
- * more rich features, e.g. parallel reading from historical data. The {@link
- * Builder} will be dropped in the future version.
+ * org.apache.inlong.sort.cdc.mysql.source.MySqlSource#builder()} instead which supports
+ * more rich features, e.g. parallel reading from historical data. The {@link
+ * Builder} will be dropped in the future version.
*/
@Deprecated
public static class Builder<T> {
@@ -75,7 +76,9 @@ public class MySqlSource {
return this;
}
- /** Integer port number of the MySQL database server. */
+ /**
+ * Integer port number of the MySQL database server.
+ */
public Builder<T> port(int port) {
this.port = port;
return this;
@@ -102,13 +105,17 @@ public class MySqlSource {
return this;
}
- /** Name of the MySQL database to use when connecting to the MySQL database server. */
+ /**
+ * Name of the MySQL database to use when connecting to the MySQL database server.
+ */
public Builder<T> username(String username) {
this.username = username;
return this;
}
- /** Password to use when connecting to the MySQL database server. */
+ /**
+ * Password to use when connecting to the MySQL database server.
+ */
public Builder<T> password(String password) {
this.password = password;
return this;
@@ -135,7 +142,9 @@ public class MySqlSource {
return this;
}
- /** The Debezium MySQL connector properties. For example, "snapshot.mode". */
+ /**
+ * The Debezium MySQL connector properties. For example, "snapshot.mode".
+ */
public Builder<T> debeziumProperties(Properties properties) {
this.dbzProperties = properties;
return this;
@@ -150,7 +159,9 @@ public class MySqlSource {
return this;
}
- /** Specifies the startup options. */
+ /**
+ * Specifies the startup options.
+ */
public Builder<T> startupOptions(StartupOptions startupOptions) {
this.startupOptions = startupOptions;
return this;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/MySqlValidator.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/MySqlValidator.java
similarity index 91%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/MySqlValidator.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/MySqlValidator.java
index 690522526..c251727c4 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/MySqlValidator.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/MySqlValidator.java
@@ -16,22 +16,23 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql;
+package org.apache.inlong.sort.cdc.mysql;
import io.debezium.config.Configuration;
import io.debezium.jdbc.JdbcConnection;
-import java.sql.SQLException;
-import java.util.Arrays;
-import java.util.Properties;
import org.apache.flink.table.api.TableException;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.Validator;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
+import org.apache.inlong.sort.cdc.debezium.Validator;
+import org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Properties;
+
/**
* The validator for MySql: it only cares about the version of the database is larger than or equal
* to 5.7. It also requires the binlog format in the database is ROW and row image is FULL.
@@ -112,7 +113,9 @@ public class MySqlValidator implements Validator {
}
}
- /** Check whether the binlog format is ROW. */
+ /**
+ * Check whether the binlog format is ROW.
+ */
private void checkBinlogFormat(JdbcConnection connection) throws SQLException {
String mode =
connection
@@ -125,13 +128,15 @@ public class MySqlValidator implements Validator {
String.format(
"The MySQL server is configured with binlog_format %s rather than %s, which is "
+ "required for this connector to work properly. "
- + "Change the MySQL configuration to use a "
+ + "Change the MySQL configuration to use a "
+ "binlog_format=ROW and restart the connector.",
mode, BINLOG_FORMAT_ROW));
}
}
- /** Check whether the binlog row image is FULL. */
+ /**
+ * Check whether the binlog row image is FULL.
+ */
private void checkBinlogRowImage(JdbcConnection connection) throws SQLException {
String rowImage =
connection
@@ -152,7 +157,7 @@ public class MySqlValidator implements Validator {
String.format(
"The MySQL server is configured with binlog_row_image %s rather than %s, which is "
+ "required for this connector to work properly. "
- + "Change the MySQL configuration to use a "
+ + "Change the MySQL configuration to use a "
+ "binlog_row_image=FULL and restart the connector.",
rowImage, BINLOG_FORMAT_IMAGE_FULL));
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/SeekBinlogToTimestampFilter.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/SeekBinlogToTimestampFilter.java
similarity index 94%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/SeekBinlogToTimestampFilter.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/SeekBinlogToTimestampFilter.java
index 4a25c6763..f92fb7bba 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/SeekBinlogToTimestampFilter.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/SeekBinlogToTimestampFilter.java
@@ -16,13 +16,13 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql;
+package org.apache.inlong.sort.cdc.mysql;
import io.debezium.data.Envelope;
import io.debezium.relational.history.TableChanges.TableChange;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.util.Collector;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.DebeziumDeserializationSchema;
+import org.apache.inlong.sort.cdc.debezium.DebeziumDeserializationSchema;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
import org.slf4j.Logger;
@@ -33,9 +33,8 @@ import org.slf4j.LoggerFactory;
* to seek binlog to the specific timestamp.
*/
public class SeekBinlogToTimestampFilter<T> implements DebeziumDeserializationSchema<T> {
- private static final long serialVersionUID = -4450118969976653497L;
protected static final Logger LOG = LoggerFactory.getLogger(SeekBinlogToTimestampFilter.class);
-
+ private static final long serialVersionUID = -4450118969976653497L;
private final long startupTimestampMillis;
private final DebeziumDeserializationSchema<T> serializer;
@@ -81,7 +80,7 @@ public class SeekBinlogToTimestampFilter<T> implements DebeziumDeserializationSc
@Override
public void deserialize(SourceRecord record, Collector<T> out, TableChange tableChange)
- throws Exception {
+ throws Exception {
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/DebeziumUtils.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/DebeziumUtils.java
similarity index 95%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/DebeziumUtils.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/DebeziumUtils.java
index fe1e91314..f5cf6fa83 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/DebeziumUtils.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/DebeziumUtils.java
@@ -16,9 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.TableDiscoveryUtils.listTables;
+package org.apache.inlong.sort.cdc.mysql.debezium;
import com.github.shyiko.mysql.binlog.BinaryLogClient;
import io.debezium.config.Configuration;
@@ -35,16 +33,19 @@ import io.debezium.relational.RelationalTableFilters;
import io.debezium.relational.TableId;
import io.debezium.schema.TopicSelector;
import io.debezium.util.SchemaNameAdjuster;
+import org.apache.flink.util.FlinkRuntimeException;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
+import org.apache.inlong.sort.cdc.mysql.source.connection.JdbcConnectionFactory;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.connection.JdbcConnectionFactory;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+
+import static org.apache.inlong.sort.cdc.mysql.source.utils.TableDiscoveryUtils.listTables;
/**
* Utilities related to Debezium.
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/EmbeddedFlinkDatabaseHistory.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/EmbeddedFlinkDatabaseHistory.java
similarity index 95%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/EmbeddedFlinkDatabaseHistory.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/EmbeddedFlinkDatabaseHistory.java
index 8f27825c2..e4041e9a2 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/EmbeddedFlinkDatabaseHistory.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/EmbeddedFlinkDatabaseHistory.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium;
+package org.apache.inlong.sort.cdc.mysql.debezium;
import io.debezium.config.Configuration;
import io.debezium.relational.TableId;
@@ -29,13 +29,13 @@ import io.debezium.relational.history.HistoryRecord;
import io.debezium.relational.history.HistoryRecordComparator;
import io.debezium.relational.history.TableChanges;
import io.debezium.relational.history.TableChanges.TableChange;
+
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplitState;
/**
* A {@link DatabaseHistory} implementation which store the latest table schema in Flink state.
@@ -54,6 +54,18 @@ public class EmbeddedFlinkDatabaseHistory implements DatabaseHistory {
private boolean storeOnlyMonitoredTablesDdl;
private boolean skipUnparseableDDL;
+ public static void registerHistory(String engineName, Collection<TableChange> engineHistory) {
+ TABLE_SCHEMAS.put(engineName, engineHistory);
+ }
+
+ public static Collection<TableChange> removeHistory(String engineName) {
+ if (engineName == null) {
+ return Collections.emptyList();
+ }
+ Collection<TableChange> tableChanges = TABLE_SCHEMAS.remove(engineName);
+ return tableChanges != null ? tableChanges : Collections.emptyList();
+ }
+
@Override
public void configure(
Configuration config,
@@ -62,7 +74,7 @@ public class EmbeddedFlinkDatabaseHistory implements DatabaseHistory {
boolean useCatalogBeforeSchema) {
this.listener = listener;
this.storeOnlyMonitoredTablesDdl = config.getBoolean(
- DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL);
+ DatabaseHistory.STORE_ONLY_MONITORED_TABLES_DDL);
this.skipUnparseableDDL = config.getBoolean(DatabaseHistory.SKIP_UNPARSEABLE_DDL_STATEMENTS);
// recover
@@ -138,16 +150,4 @@ public class EmbeddedFlinkDatabaseHistory implements DatabaseHistory {
public boolean skipUnparseableDdlStatements() {
return skipUnparseableDDL;
}
-
- public static void registerHistory(String engineName, Collection<TableChange> engineHistory) {
- TABLE_SCHEMAS.put(engineName, engineHistory);
- }
-
- public static Collection<TableChange> removeHistory(String engineName) {
- if (engineName == null) {
- return Collections.emptyList();
- }
- Collection<TableChange> tableChanges = TABLE_SCHEMAS.remove(engineName);
- return tableChanges != null ? tableChanges : Collections.emptyList();
- }
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/dispatcher/EventDispatcherImpl.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/dispatcher/EventDispatcherImpl.java
similarity index 94%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/dispatcher/EventDispatcherImpl.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/dispatcher/EventDispatcherImpl.java
index 05162ee7b..e9841fd97 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/dispatcher/EventDispatcherImpl.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/dispatcher/EventDispatcherImpl.java
@@ -16,11 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher.SignalEventDispatcher.BINLOG_FILENAME_OFFSET_KEY;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher.SignalEventDispatcher.BINLOG_POSITION_OFFSET_KEY;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.task.context.StatefulTaskContext.MySqlEventMetadataProvider.SERVER_ID_KEY;
+package org.apache.inlong.sort.cdc.mysql.debezium.dispatcher;
import io.debezium.config.CommonConnectorConfig;
import io.debezium.connector.base.ChangeEventQueue;
@@ -38,10 +34,6 @@ import io.debezium.schema.HistorizedDatabaseSchema;
import io.debezium.schema.SchemaChangeEvent;
import io.debezium.schema.TopicSelector;
import io.debezium.util.SchemaNameAdjuster;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct;
@@ -49,6 +41,15 @@ import org.apache.kafka.connect.source.SourceRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.inlong.sort.cdc.mysql.debezium.dispatcher.SignalEventDispatcher.BINLOG_FILENAME_OFFSET_KEY;
+import static org.apache.inlong.sort.cdc.mysql.debezium.dispatcher.SignalEventDispatcher.BINLOG_POSITION_OFFSET_KEY;
+import static org.apache.inlong.sort.cdc.mysql.debezium.task.context.StatefulTaskContext.MySqlEventMetadataProvider.SERVER_ID_KEY;
+
/**
* A subclass implementation of {@link EventDispatcher}.
*
@@ -60,9 +61,8 @@ import org.slf4j.LoggerFactory;
*/
public class EventDispatcherImpl<T extends DataCollectionId> extends EventDispatcher<T> {
- private static final Logger LOG = LoggerFactory.getLogger(EventDispatcherImpl.class);
-
public static final String HISTORY_RECORD_FIELD = "historyRecord";
+ private static final Logger LOG = LoggerFactory.getLogger(EventDispatcherImpl.class);
private static final DocumentWriter DOCUMENT_WRITER = DocumentWriter.defaultWriter();
private final ChangeEventQueue<DataChangeEvent> queue;
@@ -167,7 +167,9 @@ public class EventDispatcherImpl<T extends DataCollectionId> extends EventDispat
schemaChangeEventEmitter.emitSchemaChangeEvent(new SchemaChangeEventReceiver());
}
- /** A {@link SchemaChangeEventEmitter.Receiver} implementation for {@link SchemaChangeEvent}. */
+ /**
+ * A {@link SchemaChangeEventEmitter.Receiver} implementation for {@link SchemaChangeEvent}.
+ */
private final class SchemaChangeEventReceiver implements SchemaChangeEventEmitter.Receiver {
private Struct schemaChangeRecordKey(SchemaChangeEvent event) {
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/dispatcher/SignalEventDispatcher.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/dispatcher/SignalEventDispatcher.java
similarity index 94%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/dispatcher/SignalEventDispatcher.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/dispatcher/SignalEventDispatcher.java
index 49fa47eb7..1dfdb2588 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/dispatcher/SignalEventDispatcher.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/dispatcher/SignalEventDispatcher.java
@@ -16,19 +16,20 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher;
+package org.apache.inlong.sort.cdc.mysql.debezium.dispatcher;
import io.debezium.connector.base.ChangeEventQueue;
import io.debezium.pipeline.DataChangeEvent;
import io.debezium.util.SchemaNameAdjuster;
-import java.util.Map;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplit;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplit;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
+import java.util.Map;
+
/**
* A dispatcher to dispatch watermark signal events.
*
@@ -37,8 +38,6 @@ import org.apache.kafka.connect.source.SourceRecord;
*/
public class SignalEventDispatcher {
- private static final SchemaNameAdjuster SCHEMA_NAME_ADJUSTER = SchemaNameAdjuster.create();
-
public static final String DATABASE_NAME = "db";
public static final String TABLE_NAME = "table";
public static final String WATERMARK_SIGNAL = "_split_watermark_signal_";
@@ -50,7 +49,7 @@ public class SignalEventDispatcher {
"io.debezium.connector.flink.cdc.embedded.watermark.key";
public static final String SIGNAL_EVENT_VALUE_SCHEMA_NAME =
"io.debezium.connector.flink.cdc.embedded.watermark.value";
-
+ private static final SchemaNameAdjuster SCHEMA_NAME_ADJUSTER = SchemaNameAdjuster.create();
private final Schema signalEventKeySchema;
private final Schema signalEventValueSchema;
private final Map<String, ?> sourcePartition;
@@ -106,7 +105,9 @@ public class SignalEventDispatcher {
return result;
}
- /** The watermark kind. */
+ /**
+ * The watermark kind.
+ */
public enum WatermarkKind {
LOW,
HIGH,
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/reader/BinlogSplitReader.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/reader/BinlogSplitReader.java
similarity index 89%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/reader/BinlogSplitReader.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/reader/BinlogSplitReader.java
index 5944d0ff7..eae13212a 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/reader/BinlogSplitReader.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/reader/BinlogSplitReader.java
@@ -16,12 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.reader;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.getBinlogPosition;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.getSplitKey;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.getTableId;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.isDataChangeRecord;
+package org.apache.inlong.sort.cdc.mysql.debezium.reader;
import io.debezium.connector.base.ChangeEventQueue;
import io.debezium.connector.mysql.MySqlOffsetContext;
@@ -30,6 +25,22 @@ import io.debezium.pipeline.DataChangeEvent;
import io.debezium.pipeline.source.spi.ChangeEventSource;
import io.debezium.relational.TableId;
import io.debezium.relational.Tables;
+import org.apache.flink.shaded.guava18.com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.flink.util.FlinkRuntimeException;
+import org.apache.inlong.sort.cdc.mysql.debezium.task.MySqlBinlogSplitReadTask;
+import org.apache.inlong.sort.cdc.mysql.debezium.task.context.StatefulTaskContext;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlBinlogSplit;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplit;
+import org.apache.inlong.sort.cdc.mysql.source.utils.ChunkUtils;
+import org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils;
+import org.apache.kafka.connect.source.SourceRecord;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
@@ -38,21 +49,11 @@ import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
-import javax.annotation.Nullable;
-import org.apache.flink.shaded.guava18.com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.flink.table.types.logical.RowType;
-import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.task.MySqlBinlogSplitReadTask;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.task.context.StatefulTaskContext;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlBinlogSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.ChunkUtils;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils;
-import org.apache.kafka.connect.source.SourceRecord;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.getBinlogPosition;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.getSplitKey;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.getTableId;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.isDataChangeRecord;
/**
* A Debezium binlog reader implementation that also support reads binlog and filter overlapping
@@ -110,7 +111,7 @@ public class BinlogSplitReader implements DebeziumReader<SourceRecord, MySqlSpli
() -> {
try {
binlogSplitReadTask.execute(new BinlogSplitChangeEventSourceContextImpl(),
- statefulTaskContext.getOffsetContext());
+ statefulTaskContext.getOffsetContext());
} catch (Exception e) {
currentTaskRunning = false;
LOG.error(
@@ -123,14 +124,6 @@ public class BinlogSplitReader implements DebeziumReader<SourceRecord, MySqlSpli
});
}
- private class BinlogSplitChangeEventSourceContextImpl
- implements ChangeEventSource.ChangeEventSourceContext {
- @Override
- public boolean isRunning() {
- return currentTaskRunning;
- }
- }
-
@Override
public boolean isFinished() {
return currentBinlogSplit == null || !currentTaskRunning;
@@ -212,7 +205,7 @@ public class BinlogSplitReader implements DebeziumReader<SourceRecord, MySqlSpli
statefulTaskContext.getSchemaNameAdjuster());
for (FinishedSnapshotSplitInfo splitInfo : finishedSplitsInfo.get(tableId)) {
if (RecordUtils.splitKeyRangeContains(
- key, splitInfo.getSplitStart(), splitInfo.getSplitEnd())
+ key, splitInfo.getSplitStart(), splitInfo.getSplitEnd())
&& position.isAfter(splitInfo.getHighWatermark())) {
return true;
}
@@ -273,4 +266,12 @@ public class BinlogSplitReader implements DebeziumReader<SourceRecord, MySqlSpli
public void stopBinlogReadTask() {
this.currentTaskRunning = false;
}
+
+ private class BinlogSplitChangeEventSourceContextImpl
+ implements ChangeEventSource.ChangeEventSourceContext {
+ @Override
+ public boolean isRunning() {
+ return currentTaskRunning;
+ }
+ }
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/reader/DebeziumReader.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/reader/DebeziumReader.java
similarity index 95%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/reader/DebeziumReader.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/reader/DebeziumReader.java
index e6799bb37..e6dd715f9 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/reader/DebeziumReader.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/reader/DebeziumReader.java
@@ -16,10 +16,10 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.reader;
+package org.apache.inlong.sort.cdc.mysql.debezium.reader;
-import java.util.Iterator;
import javax.annotation.Nullable;
+import java.util.Iterator;
/** Reader to read split of table, the split is either snapshot split or binlog split. */
public interface DebeziumReader<T, Split> {
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/reader/SnapshotSplitReader.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/reader/SnapshotSplitReader.java
similarity index 90%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/reader/SnapshotSplitReader.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/reader/SnapshotSplitReader.java
index f1442049a..93a48c102 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/reader/SnapshotSplitReader.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/reader/SnapshotSplitReader.java
@@ -16,9 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.reader;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.normalizedSplitRecords;
+package org.apache.inlong.sort.cdc.mysql.debezium.reader;
import io.debezium.config.Configuration;
import io.debezium.connector.base.ChangeEventQueue;
@@ -30,6 +28,22 @@ import io.debezium.pipeline.DataChangeEvent;
import io.debezium.pipeline.source.spi.ChangeEventSource;
import io.debezium.pipeline.spi.SnapshotResult;
import io.debezium.util.SchemaNameAdjuster;
+import org.apache.flink.shaded.guava18.com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.flink.util.FlinkRuntimeException;
+import org.apache.inlong.sort.cdc.mysql.debezium.dispatcher.SignalEventDispatcher;
+import org.apache.inlong.sort.cdc.mysql.debezium.task.MySqlBinlogSplitReadTask;
+import org.apache.inlong.sort.cdc.mysql.debezium.task.MySqlSnapshotSplitReadTask;
+import org.apache.inlong.sort.cdc.mysql.debezium.task.context.StatefulTaskContext;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlBinlogSplit;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSnapshotSplit;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplit;
+import org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils;
+import org.apache.kafka.connect.source.SourceRecord;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
@@ -37,21 +51,8 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicBoolean;
-import javax.annotation.Nullable;
-import org.apache.flink.shaded.guava18.com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher.SignalEventDispatcher;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.task.MySqlBinlogSplitReadTask;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.task.MySqlSnapshotSplitReadTask;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.task.context.StatefulTaskContext;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlBinlogSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSnapshotSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils;
-import org.apache.kafka.connect.source.SourceRecord;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.normalizedSplitRecords;
/**
* A snapshot reader that reads data from Table in split level, the split is assigned by primary key
@@ -62,17 +63,15 @@ public class SnapshotSplitReader implements DebeziumReader<SourceRecord, MySqlSp
private static final Logger LOG = LoggerFactory.getLogger(SnapshotSplitReader.class);
private final StatefulTaskContext statefulTaskContext;
private final ExecutorService executor;
-
+ public AtomicBoolean hasNextElement;
+ public AtomicBoolean reachEnd;
private volatile ChangeEventQueue<DataChangeEvent> queue;
private volatile boolean currentTaskRunning;
private volatile Throwable readException;
-
// task to read snapshot for current split
private MySqlSnapshotSplitReadTask splitSnapshotReadTask;
private MySqlSnapshotSplit currentSnapshotSplit;
private SchemaNameAdjuster nameAdjuster;
- public AtomicBoolean hasNextElement;
- public AtomicBoolean reachEnd;
public SnapshotSplitReader(StatefulTaskContext statefulTaskContext, int subtaskId) {
this.statefulTaskContext = statefulTaskContext;
@@ -112,7 +111,7 @@ public class SnapshotSplitReader implements DebeziumReader<SourceRecord, MySqlSp
new SnapshotSplitChangeEventSourceContextImpl();
SnapshotResult snapshotResult =
splitSnapshotReadTask.execute(sourceContext,
- statefulTaskContext.getOffsetContext());
+ statefulTaskContext.getOffsetContext());
final MySqlBinlogSplit backfillBinlogSplit =
createBackfillBinlogSplit(sourceContext);
// optimization that skip the binlog read when the low watermark equals high
@@ -132,11 +131,11 @@ public class SnapshotSplitReader implements DebeziumReader<SourceRecord, MySqlSp
final MySqlBinlogSplitReadTask backfillBinlogReadTask =
createBackfillBinlogReadTask(backfillBinlogSplit);
final MySqlOffsetContext.Loader loader =
- new MySqlOffsetContext.Loader(
- statefulTaskContext.getConnectorConfig());
+ new MySqlOffsetContext.Loader(
+ statefulTaskContext.getConnectorConfig());
final MySqlOffsetContext mySqlOffsetContext =
- loader.load(
- backfillBinlogSplit.getStartingOffset().getOffset());
+ loader.load(
+ backfillBinlogSplit.getStartingOffset().getOffset());
backfillBinlogReadTask.execute(
new SnapshotBinlogSplitChangeEventSourceContextImpl(), mySqlOffsetContext);
} else {
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/MySqlBinlogSplitReadTask.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/MySqlBinlogSplitReadTask.java
similarity index 82%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/MySqlBinlogSplitReadTask.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/MySqlBinlogSplitReadTask.java
index c566d5eef..41bfe039b 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/MySqlBinlogSplitReadTask.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/MySqlBinlogSplitReadTask.java
@@ -16,10 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.task;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset.NO_STOPPING_OFFSET;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.getBinlogPosition;
+package org.apache.inlong.sort.cdc.mysql.debezium.task;
import com.github.shyiko.mysql.binlog.event.Event;
import io.debezium.DebeziumException;
@@ -32,14 +29,17 @@ import io.debezium.connector.mysql.MySqlTaskContext;
import io.debezium.pipeline.ErrorHandler;
import io.debezium.relational.TableId;
import io.debezium.util.Clock;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher.EventDispatcherImpl;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher.SignalEventDispatcher;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.reader.SnapshotSplitReader.SnapshotBinlogSplitChangeEventSourceContextImpl;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlBinlogSplit;
+import org.apache.inlong.sort.cdc.mysql.debezium.dispatcher.EventDispatcherImpl;
+import org.apache.inlong.sort.cdc.mysql.debezium.dispatcher.SignalEventDispatcher;
+import org.apache.inlong.sort.cdc.mysql.debezium.reader.SnapshotSplitReader;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlBinlogSplit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset.NO_STOPPING_OFFSET;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.getBinlogPosition;
+
/**
* Task to read all binlog for table and also supports read bounded (from lowWatermark to
* highWatermark) binlog.
@@ -87,7 +87,7 @@ public class MySqlBinlogSplitReadTask extends MySqlStreamingChangeEventSource {
@Override
public void execute(ChangeEventSourceContext context, MySqlOffsetContext offsetContext)
- throws InterruptedException {
+ throws InterruptedException {
this.context = context;
super.execute(context, offsetContext);
}
@@ -112,7 +112,7 @@ public class MySqlBinlogSplitReadTask extends MySqlStreamingChangeEventSource {
new DebeziumException("Error processing binlog signal event", e));
}
// tell reader the binlog task finished
- ((SnapshotBinlogSplitChangeEventSourceContextImpl) context).finished();
+ ((SnapshotSplitReader.SnapshotBinlogSplitChangeEventSourceContextImpl) context).finished();
}
}
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/MySqlSnapshotSplitReadTask.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/MySqlSnapshotSplitReadTask.java
similarity index 82%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/MySqlSnapshotSplitReadTask.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/MySqlSnapshotSplitReadTask.java
index 5c3fff8c0..73e9d9966 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/MySqlSnapshotSplitReadTask.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/MySqlSnapshotSplitReadTask.java
@@ -16,9 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.task;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils.currentBinlogOffset;
+package org.apache.inlong.sort.cdc.mysql.debezium.task;
import io.debezium.DebeziumException;
import io.debezium.connector.mysql.MySqlConnection;
@@ -41,6 +39,16 @@ import io.debezium.util.Clock;
import io.debezium.util.ColumnUtils;
import io.debezium.util.Strings;
import io.debezium.util.Threads;
+import org.apache.inlong.sort.cdc.mysql.debezium.dispatcher.EventDispatcherImpl;
+import org.apache.inlong.sort.cdc.mysql.debezium.dispatcher.SignalEventDispatcher;
+import org.apache.inlong.sort.cdc.mysql.debezium.reader.SnapshotSplitReader;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSnapshotSplit;
+import org.apache.inlong.sort.cdc.mysql.source.utils.StatementUtils;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.io.UnsupportedEncodingException;
import java.sql.Blob;
import java.sql.PreparedStatement;
@@ -49,22 +57,19 @@ import java.sql.SQLException;
import java.sql.Types;
import java.time.Duration;
import java.util.Calendar;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher.EventDispatcherImpl;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher.SignalEventDispatcher;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.reader.SnapshotSplitReader;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSnapshotSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.StatementUtils;
-import org.apache.kafka.connect.errors.ConnectException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-/** Task to read snapshot split of table. */
+import static org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils.currentBinlogOffset;
+
+/**
+ * Task to read snapshot split of table.
+ */
public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSource<MySqlOffsetContext> {
private static final Logger LOG = LoggerFactory.getLogger(MySqlSnapshotSplitReadTask.class);
- /** Interval for showing a log statement with the progress while scanning a single table. */
+ /**
+ * Interval for showing a log statement with the progress while scanning a single table.
+ */
private static final Duration LOG_INTERVAL = Duration.ofMillis(10_000);
private final MySqlConnectorConfig connectorConfig;
@@ -101,7 +106,7 @@ public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSourc
@Override
public SnapshotResult<MySqlOffsetContext> execute(ChangeEventSourceContext context,
- MySqlOffsetContext previousOffset) throws InterruptedException {
+ MySqlOffsetContext previousOffset) throws InterruptedException {
SnapshottingTask snapshottingTask = getSnapshottingTask(previousOffset);
final SnapshotContext ctx;
try {
@@ -121,45 +126,45 @@ public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSourc
}
protected SnapshotResult<MySqlOffsetContext> doExecute(
- ChangeEventSourceContext context,
- MySqlOffsetContext previousOffset,
- SnapshotContext<MySqlOffsetContext> snapshotContext,
- SnapshottingTask snapshottingTask)
- throws Exception {
+ ChangeEventSourceContext context,
+ MySqlOffsetContext previousOffset,
+ SnapshotContext<MySqlOffsetContext> snapshotContext,
+ SnapshottingTask snapshottingTask)
+ throws Exception {
final RelationalSnapshotChangeEventSource.RelationalSnapshotContext<MySqlOffsetContext>
- ctx =
- (RelationalSnapshotChangeEventSource.RelationalSnapshotContext<
- MySqlOffsetContext>)
- snapshotContext;
+ ctx =
+ (RelationalSnapshotChangeEventSource.RelationalSnapshotContext<
+ MySqlOffsetContext>)
+ snapshotContext;
ctx.offset = previousOffset;
SignalEventDispatcher signalEventDispatcher =
- new SignalEventDispatcher(
- previousOffset.getPartition(),
- topicSelector.topicNameFor(snapshotSplit.getTableId()),
- dispatcher.getQueue());
+ new SignalEventDispatcher(
+ previousOffset.getPartition(),
+ topicSelector.topicNameFor(snapshotSplit.getTableId()),
+ dispatcher.getQueue());
final BinlogOffset lowWatermark = currentBinlogOffset(jdbcConnection);
LOG.info(
- "Snapshot step 1 - Determining low watermark {} for split {}",
- lowWatermark,
- snapshotSplit);
+ "Snapshot step 1 - Determining low watermark {} for split {}",
+ lowWatermark,
+ snapshotSplit);
((SnapshotSplitReader.SnapshotSplitChangeEventSourceContextImpl) (context))
- .setLowWatermark(lowWatermark);
+ .setLowWatermark(lowWatermark);
signalEventDispatcher.dispatchWatermarkEvent(
- snapshotSplit, lowWatermark, SignalEventDispatcher.WatermarkKind.LOW);
+ snapshotSplit, lowWatermark, SignalEventDispatcher.WatermarkKind.LOW);
LOG.info("Snapshot step 2 - Snapshotting data");
createDataEvents(ctx, snapshotSplit.getTableId());
final BinlogOffset highWatermark = currentBinlogOffset(jdbcConnection);
LOG.info(
- "Snapshot step 3 - Determining high watermark {} for split {}",
- highWatermark,
- snapshotSplit);
+ "Snapshot step 3 - Determining high watermark {} for split {}",
+ highWatermark,
+ snapshotSplit);
signalEventDispatcher.dispatchWatermarkEvent(
- snapshotSplit, highWatermark, SignalEventDispatcher.WatermarkKind.HIGH);
+ snapshotSplit, highWatermark, SignalEventDispatcher.WatermarkKind.HIGH);
((SnapshotSplitReader.SnapshotSplitChangeEventSourceContextImpl) (context))
- .setHighWatermark(highWatermark);
+ .setHighWatermark(highWatermark);
return SnapshotResult.completed(ctx.offset);
}
@@ -175,14 +180,6 @@ public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSourc
return new MySqlSnapshotContext();
}
- private static class MySqlSnapshotContext
- extends RelationalSnapshotChangeEventSource.RelationalSnapshotContext {
-
- public MySqlSnapshotContext() throws SQLException {
- super("");
- }
- }
-
private void createDataEvents(
RelationalSnapshotChangeEventSource.RelationalSnapshotContext snapshotContext,
TableId tableId)
@@ -195,7 +192,9 @@ public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSourc
snapshotReceiver.completeSnapshot();
}
- /** Dispatches the data change events for the records of a single table. */
+ /**
+ * Dispatches the data change events for the records of a single table.
+ */
private void createDataEventsForTable(
RelationalSnapshotChangeEventSource.RelationalSnapshotContext snapshotContext,
EventDispatcher.SnapshotReceiver snapshotReceiver,
@@ -218,16 +217,16 @@ public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSourc
selectSql);
try (PreparedStatement selectStatement =
- StatementUtils.readTableSplitDataStatement(
- jdbcConnection,
- selectSql,
- snapshotSplit.getSplitStart() == null,
- snapshotSplit.getSplitEnd() == null,
- snapshotSplit.getSplitStart(),
- snapshotSplit.getSplitEnd(),
- snapshotSplit.getSplitKeyType().getFieldCount(),
- connectorConfig.getQueryFetchSize());
- ResultSet rs = selectStatement.executeQuery()) {
+ StatementUtils.readTableSplitDataStatement(
+ jdbcConnection,
+ selectSql,
+ snapshotSplit.getSplitStart() == null,
+ snapshotSplit.getSplitEnd() == null,
+ snapshotSplit.getSplitStart(),
+ snapshotSplit.getSplitEnd(),
+ snapshotSplit.getSplitKeyType().getFieldCount(),
+ connectorConfig.getQueryFetchSize());
+ ResultSet rs = selectStatement.executeQuery()) {
ColumnUtils.ColumnArray columnArray = ColumnUtils.toArray(rs, table);
long rows = 0;
@@ -354,7 +353,7 @@ public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSourc
try {
return MySqlValueConverters.containsZeroValuesInDatePart(
- (new String(b.getBytes(1, (int) (b.length())), "UTF-8")), column, table)
+ (new String(b.getBytes(1, (int) (b.length())), "UTF-8")), column, table)
? null
: rs.getTimestamp(fieldNo, Calendar.getInstance());
} catch (UnsupportedEncodingException e) {
@@ -362,4 +361,12 @@ public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSourc
throw new RuntimeException(e);
}
}
+
+ private static class MySqlSnapshotContext
+ extends RelationalSnapshotChangeEventSource.RelationalSnapshotContext {
+
+ public MySqlSnapshotContext() throws SQLException {
+ super("");
+ }
+ }
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/context/MySqlErrorHandler.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/context/MySqlErrorHandler.java
similarity index 97%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/context/MySqlErrorHandler.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/context/MySqlErrorHandler.java
index 7907ea0d3..9365203a0 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/context/MySqlErrorHandler.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/context/MySqlErrorHandler.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.task.context;
+package org.apache.inlong.sort.cdc.mysql.debezium.task.context;
import io.debezium.DebeziumException;
import io.debezium.connector.base.ChangeEventQueue;
@@ -24,11 +24,12 @@ import io.debezium.connector.mysql.MySqlConnector;
import io.debezium.connector.mysql.MySqlTaskContext;
import io.debezium.pipeline.ErrorHandler;
import io.debezium.relational.TableId;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
/**
* A subclass implementation of {@link ErrorHandler} which filter some {@link DebeziumException}, we
* use this class instead of {@link io.debezium.connector.mysql.MySqlErrorHandler}.
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/context/MySqlTaskContextImpl.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/context/MySqlTaskContextImpl.java
similarity index 95%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/context/MySqlTaskContextImpl.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/context/MySqlTaskContextImpl.java
index 4cef227b5..e2665cc55 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/context/MySqlTaskContextImpl.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/context/MySqlTaskContextImpl.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.task.context;
+package org.apache.inlong.sort.cdc.mysql.debezium.task.context;
import com.github.shyiko.mysql.binlog.BinaryLogClient;
import io.debezium.connector.mysql.MySqlConnectorConfig;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/context/StatefulTaskContext.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/context/StatefulTaskContext.java
similarity index 93%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/context/StatefulTaskContext.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/context/StatefulTaskContext.java
index 7b1ae1547..028529b72 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/debezium/task/context/StatefulTaskContext.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/debezium/task/context/StatefulTaskContext.java
@@ -16,9 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.task.context;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset.BINLOG_FILENAME_OFFSET_KEY;
+package org.apache.inlong.sort.cdc.mysql.debezium.task.context;
import com.github.shyiko.mysql.binlog.BinaryLogClient;
import io.debezium.connector.AbstractSourceInfo;
@@ -43,19 +41,22 @@ import io.debezium.schema.TopicSelector;
import io.debezium.util.Clock;
import io.debezium.util.Collect;
import io.debezium.util.SchemaNameAdjuster;
-import java.time.Instant;
-import java.util.List;
-import java.util.Map;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.EmbeddedFlinkDatabaseHistory;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher.EventDispatcherImpl;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplit;
+import org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils;
+import org.apache.inlong.sort.cdc.mysql.debezium.EmbeddedFlinkDatabaseHistory;
+import org.apache.inlong.sort.cdc.mysql.debezium.dispatcher.EventDispatcherImpl;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplit;
import org.apache.kafka.connect.data.Struct;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.time.Instant;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.inlong.sort.cdc.mysql.debezium.task.context.StatefulTaskContext.MySqlEventMetadataProvider.BINLOG_FILENAME_OFFSET_KEY;
+
/**
* A stateful task context that contains entries the debezium mysql connector task required.
*
@@ -96,6 +97,10 @@ public class StatefulTaskContext {
this.connection = connection;
}
+ public static Clock getClock() {
+ return clock;
+ }
+
/**
* configure
*/
@@ -164,7 +169,9 @@ public class StatefulTaskContext {
schema.recover(offset);
}
- /** Loads the connector's persistent offset (if present) via the given loader. */
+ /**
+ * Loads the connector's persistent offset (if present) via the given loader.
+ */
private MySqlOffsetContext loadStartingOffsetState(
OffsetContext.Loader loader, MySqlSplit mySqlSplit) {
BinlogOffset offset =
@@ -210,61 +217,6 @@ public class StatefulTaskContext {
return found;
}
- /** Copied from debezium for accessing here. */
- public static class MySqlEventMetadataProvider implements EventMetadataProvider {
- public static final String SERVER_ID_KEY = "server_id";
-
- public static final String GTID_KEY = "gtid";
- public static final String BINLOG_FILENAME_OFFSET_KEY = "file";
- public static final String BINLOG_POSITION_OFFSET_KEY = "pos";
- public static final String BINLOG_ROW_IN_EVENT_OFFSET_KEY = "row";
- public static final String THREAD_KEY = "thread";
- public static final String QUERY_KEY = "query";
-
- @Override
- public Instant getEventTimestamp(
- DataCollectionId source, OffsetContext offset, Object key, Struct value) {
- if (value == null) {
- return null;
- }
- final Struct sourceInfo = value.getStruct(Envelope.FieldName.SOURCE);
- if (source == null) {
- return null;
- }
- final Long timestamp = sourceInfo.getInt64(AbstractSourceInfo.TIMESTAMP_KEY);
- return timestamp == null ? null : Instant.ofEpochMilli(timestamp);
- }
-
- @Override
- public Map<String, String> getEventSourcePosition(
- DataCollectionId source, OffsetContext offset, Object key, Struct value) {
- if (value == null) {
- return null;
- }
- final Struct sourceInfo = value.getStruct(Envelope.FieldName.SOURCE);
- if (source == null) {
- return null;
- }
- return Collect.hashMapOf(
- BINLOG_FILENAME_OFFSET_KEY,
- sourceInfo.getString(BINLOG_FILENAME_OFFSET_KEY),
- BINLOG_POSITION_OFFSET_KEY,
- Long.toString(sourceInfo.getInt64(BINLOG_POSITION_OFFSET_KEY)),
- BINLOG_ROW_IN_EVENT_OFFSET_KEY,
- Integer.toString(sourceInfo.getInt32(BINLOG_ROW_IN_EVENT_OFFSET_KEY)));
- }
-
- @Override
- public String getTransactionId(
- DataCollectionId source, OffsetContext offset, Object key, Struct value) {
- return ((MySqlOffsetContext) offset).getTransactionId();
- }
- }
-
- public static Clock getClock() {
- return clock;
- }
-
public MySqlSourceConfig getSourceConfig() {
return sourceConfig;
}
@@ -322,4 +274,57 @@ public class StatefulTaskContext {
public SchemaNameAdjuster getSchemaNameAdjuster() {
return schemaNameAdjuster;
}
+
+ /**
+ * Copied from debezium for accessing here.
+ */
+ public static class MySqlEventMetadataProvider implements EventMetadataProvider {
+ public static final String SERVER_ID_KEY = "server_id";
+
+ public static final String GTID_KEY = "gtid";
+ public static final String BINLOG_FILENAME_OFFSET_KEY = "file";
+ public static final String BINLOG_POSITION_OFFSET_KEY = "pos";
+ public static final String BINLOG_ROW_IN_EVENT_OFFSET_KEY = "row";
+ public static final String THREAD_KEY = "thread";
+ public static final String QUERY_KEY = "query";
+
+ @Override
+ public Instant getEventTimestamp(
+ DataCollectionId source, OffsetContext offset, Object key, Struct value) {
+ if (value == null) {
+ return null;
+ }
+ final Struct sourceInfo = value.getStruct(Envelope.FieldName.SOURCE);
+ if (source == null) {
+ return null;
+ }
+ final Long timestamp = sourceInfo.getInt64(AbstractSourceInfo.TIMESTAMP_KEY);
+ return timestamp == null ? null : Instant.ofEpochMilli(timestamp);
+ }
+
+ @Override
+ public Map<String, String> getEventSourcePosition(
+ DataCollectionId source, OffsetContext offset, Object key, Struct value) {
+ if (value == null) {
+ return null;
+ }
+ final Struct sourceInfo = value.getStruct(Envelope.FieldName.SOURCE);
+ if (source == null) {
+ return null;
+ }
+ return Collect.hashMapOf(
+ BINLOG_FILENAME_OFFSET_KEY,
+ sourceInfo.getString(BINLOG_FILENAME_OFFSET_KEY),
+ BINLOG_POSITION_OFFSET_KEY,
+ Long.toString(sourceInfo.getInt64(BINLOG_POSITION_OFFSET_KEY)),
+ BINLOG_ROW_IN_EVENT_OFFSET_KEY,
+ Integer.toString(sourceInfo.getInt32(BINLOG_ROW_IN_EVENT_OFFSET_KEY)));
+ }
+
+ @Override
+ public String getTransactionId(
+ DataCollectionId source, OffsetContext offset, Object key, Struct value) {
+ return ((MySqlOffsetContext) offset).getTransactionId();
+ }
+ }
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlFieldDefinition.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlFieldDefinition.java
similarity index 91%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlFieldDefinition.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlFieldDefinition.java
index cff1fa361..79dc72b94 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlFieldDefinition.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlFieldDefinition.java
@@ -16,13 +16,15 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.schema;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.StatementUtils.quote;
+package org.apache.inlong.sort.cdc.mysql.schema;
import org.apache.commons.lang3.StringUtils;
-/** used to generate field definition in ddl with "desc table". */
+import static org.apache.inlong.sort.cdc.mysql.source.utils.StatementUtils.quote;
+
+/**
+ * used to generate field definition in ddl with "desc table".
+ */
class MySqlFieldDefinition {
private String columnName;
private String columnType;
@@ -48,10 +50,6 @@ class MySqlFieldDefinition {
this.columnType = columnType;
}
- public void setNullable(boolean nullable) {
- this.nullable = nullable;
- }
-
public String getDefaultValue() {
return StringUtils.isEmpty(defaultValue) ? "" : "DEFAULT " + defaultValue;
}
@@ -68,6 +66,10 @@ class MySqlFieldDefinition {
return nullable;
}
+ public void setNullable(boolean nullable) {
+ this.nullable = nullable;
+ }
+
public boolean isKey() {
return key;
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlSchema.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlSchema.java
similarity index 94%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlSchema.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlSchema.java
index 1d7fe6df3..fa62c45e9 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlSchema.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlSchema.java
@@ -16,10 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.schema;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils.createMySqlDatabaseSchema;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.StatementUtils.quote;
+package org.apache.inlong.sort.cdc.mysql.schema;
import io.debezium.connector.mysql.MySqlConnectorConfig;
import io.debezium.connector.mysql.MySqlDatabaseSchema;
@@ -28,17 +25,23 @@ import io.debezium.jdbc.JdbcConnection;
import io.debezium.relational.TableId;
import io.debezium.relational.history.TableChanges.TableChange;
import io.debezium.schema.SchemaChangeEvent;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.flink.util.FlinkRuntimeException;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
+
import java.sql.SQLException;
import java.time.Instant;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
-/** A component used to get schema by table path. */
+import static org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils.createMySqlDatabaseSchema;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.StatementUtils.quote;
+
+/**
+ * A component used to get schema by table path.
+ */
public class MySqlSchema {
private static final String SHOW_CREATE_TABLE = "SHOW CREATE TABLE ";
private static final String DESC_TABLE = "DESC ";
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlTableDefinition.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlTableDefinition.java
similarity index 87%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlTableDefinition.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlTableDefinition.java
index 460291f86..653c3eb63 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlTableDefinition.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlTableDefinition.java
@@ -16,17 +16,20 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.schema;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.StatementUtils.quote;
+package org.apache.inlong.sort.cdc.mysql.schema;
import io.debezium.relational.TableId;
+import org.apache.flink.util.CollectionUtil;
+import org.apache.inlong.sort.cdc.mysql.source.utils.StatementUtils;
+
import java.util.List;
import java.util.stream.Collectors;
-import org.apache.flink.util.CollectionUtil;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.StatementUtils;
-/** used to generate table definition in ddl with "desc table". */
+import static org.apache.inlong.sort.cdc.mysql.source.utils.StatementUtils.quote;
+
+/**
+ * used to generate table definition in ddl with "desc table".
+ */
public class MySqlTableDefinition {
TableId tableId;
List<MySqlFieldDefinition> fieldDefinitions;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlTypeUtils.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlTypeUtils.java
similarity index 98%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlTypeUtils.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlTypeUtils.java
index 2c17900b7..ffd6c90fa 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/schema/MySqlTypeUtils.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/schema/MySqlTypeUtils.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.schema;
+package org.apache.inlong.sort.cdc.mysql.schema;
import io.debezium.relational.Column;
import org.apache.flink.table.api.DataTypes;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/MySqlSource.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/MySqlSource.java
similarity index 78%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/MySqlSource.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/MySqlSource.java
index b377ac499..125a816ab 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/MySqlSource.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/MySqlSource.java
@@ -16,16 +16,10 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils.discoverCapturedTables;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils.openJdbcConnection;
+package org.apache.inlong.sort.cdc.mysql.source;
import io.debezium.jdbc.JdbcConnection;
import io.debezium.relational.TableId;
-import java.lang.reflect.Method;
-import java.util.List;
-import java.util.function.Supplier;
import org.apache.flink.annotation.Internal;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.api.common.typeinfo.TypeInformation;
@@ -41,29 +35,36 @@ import org.apache.flink.connector.base.source.reader.synchronization.FutureCompl
import org.apache.flink.core.io.SimpleVersionedSerializer;
import org.apache.flink.metrics.MetricGroup;
import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.DebeziumDeserializationSchema;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.MySqlValidator;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.MySqlBinlogSplitAssigner;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.MySqlHybridSplitAssigner;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.MySqlSplitAssigner;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state.BinlogPendingSplitsState;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state.HybridPendingSplitsState;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state.PendingSplitsState;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state.PendingSplitsStateSerializer;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfigFactory;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.metrics.MySqlSourceReaderMetrics;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlRecordEmitter;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSourceReader;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSourceReaderContext;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSplitReader;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplitSerializer;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.table.StartupMode;
+import org.apache.inlong.sort.cdc.debezium.DebeziumDeserializationSchema;
+import org.apache.inlong.sort.cdc.mysql.MySqlValidator;
+import org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.MySqlBinlogSplitAssigner;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.MySqlHybridSplitAssigner;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.MySqlSplitAssigner;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.state.BinlogPendingSplitsState;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.state.HybridPendingSplitsState;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.state.PendingSplitsState;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.state.PendingSplitsStateSerializer;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfigFactory;
+import org.apache.inlong.sort.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
+import org.apache.inlong.sort.cdc.mysql.source.metrics.MySqlSourceReaderMetrics;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlRecordEmitter;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlSourceReader;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlSourceReaderContext;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlSplitReader;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplit;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplitSerializer;
+import org.apache.inlong.sort.cdc.mysql.table.StartupMode;
import org.apache.kafka.connect.source.SourceRecord;
+import java.lang.reflect.Method;
+import java.util.List;
+import java.util.function.Supplier;
+
+import static org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils.discoverCapturedTables;
+import static org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils.openJdbcConnection;
+
/**
* The MySQL CDC Source based on FLIP-27 and Watermark Signal Algorithm which supports parallel
* reading snapshot of table and then continue to capture data change from binlog.
@@ -101,6 +102,13 @@ public class MySqlSource<T>
private final MySqlSourceConfigFactory configFactory;
private final DebeziumDeserializationSchema<T> deserializationSchema;
+ MySqlSource(
+ MySqlSourceConfigFactory configFactory,
+ DebeziumDeserializationSchema<T> deserializationSchema) {
+ this.configFactory = configFactory;
+ this.deserializationSchema = deserializationSchema;
+ }
+
/**
* Get a MySqlParallelSourceBuilder to build a {@link MySqlSource}.
*
@@ -111,13 +119,6 @@ public class MySqlSource<T>
return new MySqlSourceBuilder<>();
}
- MySqlSource(
- MySqlSourceConfigFactory configFactory,
- DebeziumDeserializationSchema<T> deserializationSchema) {
- this.configFactory = configFactory;
- this.deserializationSchema = deserializationSchema;
- }
-
public MySqlSourceConfigFactory getConfigFactory() {
return configFactory;
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/MySqlSourceBuilder.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/MySqlSourceBuilder.java
similarity index 86%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/MySqlSourceBuilder.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/MySqlSourceBuilder.java
index ea97048c0..722262b6a 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/MySqlSourceBuilder.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/MySqlSourceBuilder.java
@@ -16,16 +16,17 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source;
+package org.apache.inlong.sort.cdc.mysql.source;
-import static org.apache.flink.util.Preconditions.checkNotNull;
+import org.apache.flink.annotation.PublicEvolving;
+import org.apache.inlong.sort.cdc.debezium.DebeziumDeserializationSchema;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfigFactory;
+import org.apache.inlong.sort.cdc.mysql.table.StartupOptions;
import java.time.Duration;
import java.util.Properties;
-import org.apache.flink.annotation.PublicEvolving;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.DebeziumDeserializationSchema;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfigFactory;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.table.StartupOptions;
+
+import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* The builder class for {@link MySqlSource} to make it easier for the users to construct a {@link
@@ -58,7 +59,9 @@ public class MySqlSourceBuilder<T> {
return this;
}
- /** Integer port number of the MySQL database server. */
+ /**
+ * Integer port number of the MySQL database server.
+ */
public MySqlSourceBuilder<T> port(int port) {
this.configFactory.port(port);
return this;
@@ -83,13 +86,17 @@ public class MySqlSourceBuilder<T> {
return this;
}
- /** Name of the MySQL database to use when connecting to the MySQL database server. */
+ /**
+ * Name of the MySQL database to use when connecting to the MySQL database server.
+ */
public MySqlSourceBuilder<T> username(String username) {
this.configFactory.username(username);
return this;
}
- /** Password to use when connecting to the MySQL database server. */
+ /**
+ * Password to use when connecting to the MySQL database server.
+ */
public MySqlSourceBuilder<T> password(String password) {
this.configFactory.password(password);
return this;
@@ -155,7 +162,9 @@ public class MySqlSourceBuilder<T> {
return this;
}
- /** The maximum fetch size for per poll when read table snapshot. */
+ /**
+ * The maximum fetch size for per poll when read table snapshot.
+ */
public MySqlSourceBuilder<T> fetchSize(int fetchSize) {
this.configFactory.fetchSize(fetchSize);
return this;
@@ -170,43 +179,57 @@ public class MySqlSourceBuilder<T> {
return this;
}
- /** The max retry times to get connection. */
+ /**
+ * The max retry times to get connection.
+ */
public MySqlSourceBuilder<T> connectMaxRetries(int connectMaxRetries) {
this.configFactory.connectMaxRetries(connectMaxRetries);
return this;
}
- /** The connection pool size. */
+ /**
+ * The connection pool size.
+ */
public MySqlSourceBuilder<T> connectionPoolSize(int connectionPoolSize) {
this.configFactory.connectionPoolSize(connectionPoolSize);
return this;
}
- /** Whether the {@link MySqlSource} should output the schema changes or not. */
+ /**
+ * Whether the {@link MySqlSource} should output the schema changes or not.
+ */
public MySqlSourceBuilder<T> includeSchemaChanges(boolean includeSchemaChanges) {
this.configFactory.includeSchemaChanges(includeSchemaChanges);
return this;
}
- /** Whether the {@link MySqlSource} should scan the newly added tables or not. */
+ /**
+ * Whether the {@link MySqlSource} should scan the newly added tables or not.
+ */
public MySqlSourceBuilder<T> scanNewlyAddedTableEnabled(boolean scanNewlyAddedTableEnabled) {
this.configFactory.scanNewlyAddedTableEnabled(scanNewlyAddedTableEnabled);
return this;
}
- /** Specifies the startup options. */
+ /**
+ * Specifies the startup options.
+ */
public MySqlSourceBuilder<T> startupOptions(StartupOptions startupOptions) {
this.configFactory.startupOptions(startupOptions);
return this;
}
- /** Custom properties that will overwrite the default JDBC connection URL. */
+ /**
+ * Custom properties that will overwrite the default JDBC connection URL.
+ */
public MySqlSourceBuilder<T> jdbcProperties(Properties jdbcProperties) {
this.configFactory.jdbcProperties(jdbcProperties);
return this;
}
- /** The Debezium MySQL connector properties. For example, "snapshot.mode". */
+ /**
+ * The Debezium MySQL connector properties. For example, "snapshot.mode".
+ */
public MySqlSourceBuilder<T> debeziumProperties(Properties properties) {
this.configFactory.debeziumProperties(properties);
return this;
@@ -221,7 +244,9 @@ public class MySqlSourceBuilder<T> {
return this;
}
- /** The interval of heartbeat event. */
+ /**
+ * The interval of heartbeat event.
+ */
public MySqlSourceBuilder<T> heartbeatInterval(Duration heartbeatInterval) {
this.configFactory.heartbeatInterval(heartbeatInterval);
return this;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/AssignerStatus.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/AssignerStatus.java
similarity index 98%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/AssignerStatus.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/AssignerStatus.java
index a77df359a..c98f77224 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/AssignerStatus.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/AssignerStatus.java
@@ -16,13 +16,13 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners;
-
-import static java.lang.String.format;
+package org.apache.inlong.sort.cdc.mysql.source.assigners;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static java.lang.String.format;
+
/**
* The state of split assigner finite state machine, tips: we use word status instead of word state
* to avoid conflict with Flink state keyword. The assigner finite state machine goes this way.
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/ChunkRange.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/ChunkRange.java
similarity index 97%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/ChunkRange.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/ChunkRange.java
index 1de32c9a5..f0958f911 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/ChunkRange.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/ChunkRange.java
@@ -16,12 +16,12 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners;
+package org.apache.inlong.sort.cdc.mysql.source.assigners;
-import static org.apache.flink.util.Preconditions.checkArgument;
-
-import java.util.Objects;
import javax.annotation.Nullable;
+import java.util.Objects;
+
+import static org.apache.flink.util.Preconditions.checkArgument;
/**
* An internal structure describes a chunk range with a chunk start (inclusive) and chunk end
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/ChunkSplitter.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/ChunkSplitter.java
similarity index 87%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/ChunkSplitter.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/ChunkSplitter.java
index 6b634aee4..e4a6bd3e4 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/ChunkSplitter.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/ChunkSplitter.java
@@ -16,21 +16,26 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils.openJdbcConnection;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.ObjectUtils.doubleCompare;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.StatementUtils.queryApproximateRowCnt;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.StatementUtils.queryMin;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.StatementUtils.queryMinMax;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.StatementUtils.queryNextChunkMax;
-import static java.math.BigDecimal.ROUND_CEILING;
+package org.apache.inlong.sort.cdc.mysql.source.assigners;
import io.debezium.jdbc.JdbcConnection;
import io.debezium.relational.Column;
import io.debezium.relational.Table;
import io.debezium.relational.TableId;
import io.debezium.relational.history.TableChanges.TableChange;
+import org.apache.flink.table.types.DataType;
+import org.apache.flink.table.types.logical.LogicalTypeRoot;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.flink.util.FlinkRuntimeException;
+import org.apache.inlong.sort.cdc.mysql.schema.MySqlSchema;
+import org.apache.inlong.sort.cdc.mysql.schema.MySqlTypeUtils;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSnapshotSplit;
+import org.apache.inlong.sort.cdc.mysql.source.utils.ChunkUtils;
+import org.apache.inlong.sort.cdc.mysql.source.utils.ObjectUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
@@ -40,18 +45,14 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
-import org.apache.flink.table.types.DataType;
-import org.apache.flink.table.types.logical.LogicalTypeRoot;
-import org.apache.flink.table.types.logical.RowType;
-import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.schema.MySqlSchema;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.schema.MySqlTypeUtils;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSnapshotSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.ChunkUtils;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.ObjectUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+
+import static java.math.BigDecimal.ROUND_CEILING;
+import static org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils.openJdbcConnection;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.ObjectUtils.doubleCompare;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.StatementUtils.queryApproximateRowCnt;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.StatementUtils.queryMin;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.StatementUtils.queryMinMax;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.StatementUtils.queryNextChunkMax;
/**
* The {@code ChunkSplitter}'s task is to split table into a set of chunks or called splits (i.e.
@@ -69,7 +70,43 @@ class ChunkSplitter {
this.sourceConfig = sourceConfig;
}
- /** Generates all snapshot splits (chunks) for the give table path. */
+ /**
+ * Checks whether split column is evenly distributed across its range.
+ */
+ private static boolean isEvenlySplitColumn(Column splitColumn) {
+ DataType flinkType = MySqlTypeUtils.fromDbzColumn(splitColumn);
+ LogicalTypeRoot typeRoot = flinkType.getLogicalType().getTypeRoot();
+
+ // currently, we only support the optimization that split column with type BIGINT, INT,
+ // DECIMAL
+ return typeRoot == LogicalTypeRoot.BIGINT
+ || typeRoot == LogicalTypeRoot.INTEGER
+ || typeRoot == LogicalTypeRoot.DECIMAL;
+ }
+
+ // --------------------------------------------------------------------------------------------
+ // Utilities
+ // --------------------------------------------------------------------------------------------
+
+ private static String splitId(TableId tableId, int chunkId) {
+ return tableId.toString() + ":" + chunkId;
+ }
+
+ private static void maySleep(int count, TableId tableId) {
+ // every 100 queries to sleep 1s
+ if (count % 10 == 0) {
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException e) {
+ // nothing to do
+ }
+ LOG.info("ChunkSplitter has split {} chunks for table {}", count, tableId);
+ }
+ }
+
+ /**
+ * Generates all snapshot splits (chunks) for the give table path.
+ */
public Collection<MySqlSnapshotSplit> generateSplits(TableId tableId) {
try (JdbcConnection jdbc = openJdbcConnection(sourceConfig)) {
@@ -114,10 +151,6 @@ class ChunkSplitter {
}
}
- // --------------------------------------------------------------------------------------------
- // Utilities
- // --------------------------------------------------------------------------------------------
-
/**
* We can use evenly-sized chunks or unevenly-sized chunks when split table into chunks, using
* evenly-sized chunks which is much efficient, using unevenly-sized chunks which will request
@@ -169,7 +202,7 @@ class ChunkSplitter {
TableId tableId, Object min, Object max, long approximateRowCnt, int chunkSize) {
LOG.info(
"Use evenly-sized chunk optimization for table {}, "
- + "the approximate row count is {}, the chunk size is {}",
+ + "the approximate row count is {}, the chunk size is {}",
tableId,
approximateRowCnt,
chunkSize);
@@ -191,7 +224,11 @@ class ChunkSplitter {
return splits;
}
- /** Split table into unevenly sized chunks by continuously calculating next chunk max value. */
+ // ------------------------------------------------------------------------------------------
+
+ /**
+ * Split table into unevenly sized chunks by continuously calculating next chunk max value.
+ */
private List<ChunkRange> splitUnevenlySizedChunks(
JdbcConnection jdbc,
TableId tableId,
@@ -250,8 +287,8 @@ class ChunkSplitter {
Object chunkStart,
Object chunkEnd) {
// currently, we only support single split column
- Object[] splitStart = chunkStart == null ? null : new Object[] {chunkStart};
- Object[] splitEnd = chunkEnd == null ? null : new Object[] {chunkEnd};
+ Object[] splitStart = chunkStart == null ? null : new Object[]{chunkStart};
+ Object[] splitEnd = chunkEnd == null ? null : new Object[]{chunkEnd};
Map<TableId, TableChange> schema = new HashMap<>();
schema.put(tableId, mySqlSchema.getTableSchema(jdbc, tableId));
return new MySqlSnapshotSplit(
@@ -264,21 +301,9 @@ class ChunkSplitter {
schema);
}
- // ------------------------------------------------------------------------------------------
-
- /** Checks whether split column is evenly distributed across its range. */
- private static boolean isEvenlySplitColumn(Column splitColumn) {
- DataType flinkType = MySqlTypeUtils.fromDbzColumn(splitColumn);
- LogicalTypeRoot typeRoot = flinkType.getLogicalType().getTypeRoot();
-
- // currently, we only support the optimization that split column with type BIGINT, INT,
- // DECIMAL
- return typeRoot == LogicalTypeRoot.BIGINT
- || typeRoot == LogicalTypeRoot.INTEGER
- || typeRoot == LogicalTypeRoot.DECIMAL;
- }
-
- /** Returns the distribution factor of the given table. */
+ /**
+ * Returns the distribution factor of the given table.
+ */
private double calculateDistributionFactor(
TableId tableId, Object min, Object max, long approximateRowCnt) {
@@ -298,7 +323,7 @@ class ChunkSplitter {
subRowCnt.divide(new BigDecimal(approximateRowCnt), 4, ROUND_CEILING).doubleValue();
LOG.info(
"The distribution factor of table {} is {} according to "
- + "the min split key {}, max split key {} and approximate row count {}",
+ + "the min split key {}, max split key {} and approximate row count {}",
tableId,
distributionFactor,
min,
@@ -306,20 +331,4 @@ class ChunkSplitter {
approximateRowCnt);
return distributionFactor;
}
-
- private static String splitId(TableId tableId, int chunkId) {
- return tableId.toString() + ":" + chunkId;
- }
-
- private static void maySleep(int count, TableId tableId) {
- // every 100 queries to sleep 1s
- if (count % 10 == 0) {
- try {
- Thread.sleep(100);
- } catch (InterruptedException e) {
- // nothing to do
- }
- LOG.info("ChunkSplitter has split {} chunks for table {}", count, tableId);
- }
- }
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlBinlogSplitAssigner.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlBinlogSplitAssigner.java
similarity index 84%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlBinlogSplitAssigner.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlBinlogSplitAssigner.java
index acfff0e27..26f1a7f60 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlBinlogSplitAssigner.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlBinlogSplitAssigner.java
@@ -16,16 +16,27 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners;
-
-import static com.ververica.cdc.connectors.mysql.source.utils.TableDiscoveryUtils.listTables;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils.currentBinlogOffset;
+package org.apache.inlong.sort.cdc.mysql.source.assigners;
import io.debezium.connector.mysql.MySqlConnection;
import io.debezium.jdbc.JdbcConnection;
import io.debezium.relational.RelationalTableFilters;
import io.debezium.relational.TableId;
import io.debezium.relational.history.TableChanges.TableChange;
+import org.apache.flink.util.FlinkRuntimeException;
+import org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils;
+import org.apache.inlong.sort.cdc.mysql.schema.MySqlSchema;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.state.BinlogPendingSplitsState;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.state.PendingSplitsState;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlBinlogSplit;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
@@ -34,19 +45,9 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
-import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.schema.MySqlSchema;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state.BinlogPendingSplitsState;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state.PendingSplitsState;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlBinlogSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplit;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+
+import static com.ververica.cdc.connectors.mysql.source.utils.TableDiscoveryUtils.listTables;
+import static org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils.currentBinlogOffset;
/**
* A {@link MySqlSplitAssigner} which only read binlog from current binlog position.
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlHybridSplitAssigner.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlHybridSplitAssigner.java
similarity index 87%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlHybridSplitAssigner.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlHybridSplitAssigner.java
index 712b3a086..7c25995c5 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlHybridSplitAssigner.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlHybridSplitAssigner.java
@@ -16,13 +16,20 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.AssignerStatus.isInitialAssigningFinished;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.AssignerStatus.isNewlyAddedAssigningFinished;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.AssignerStatus.isSuspended;
+package org.apache.inlong.sort.cdc.mysql.source.assigners;
import io.debezium.relational.TableId;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.state.HybridPendingSplitsState;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.state.PendingSplitsState;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlBinlogSplit;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSnapshotSplit;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
@@ -31,16 +38,10 @@ import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state.HybridPendingSplitsState;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state.PendingSplitsState;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlBinlogSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSnapshotSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplit;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+
+import static org.apache.inlong.sort.cdc.mysql.source.assigners.AssignerStatus.isInitialAssigningFinished;
+import static org.apache.inlong.sort.cdc.mysql.source.assigners.AssignerStatus.isNewlyAddedAssigningFinished;
+import static org.apache.inlong.sort.cdc.mysql.source.assigners.AssignerStatus.isSuspended;
/**
* A {@link MySqlSplitAssigner} that splits tables into small chunk splits based on primary key
@@ -52,10 +53,8 @@ public class MySqlHybridSplitAssigner implements MySqlSplitAssigner {
private static final String BINLOG_SPLIT_ID = "binlog-split";
private final int splitMetaGroupSize;
-
- private boolean isBinlogSplitAssigned;
-
private final MySqlSnapshotSplitAssigner snapshotSplitAssigner;
+ private boolean isBinlogSplitAssigned;
public MySqlHybridSplitAssigner(
MySqlSourceConfig sourceConfig,
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlSnapshotSplitAssigner.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlSnapshotSplitAssigner.java
similarity index 89%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlSnapshotSplitAssigner.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlSnapshotSplitAssigner.java
index 65c415d6c..475e10eb7 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlSnapshotSplitAssigner.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlSnapshotSplitAssigner.java
@@ -16,15 +16,26 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils.discoverCapturedTables;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils.openJdbcConnection;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.AssignerStatus.isAssigningFinished;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.AssignerStatus.isSuspended;
+package org.apache.inlong.sort.cdc.mysql.source.assigners;
import io.debezium.jdbc.JdbcConnection;
import io.debezium.relational.TableId;
+import org.apache.flink.shaded.guava18.com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.flink.util.FlinkRuntimeException;
+import org.apache.flink.util.Preconditions;
+import org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils;
+import org.apache.inlong.sort.cdc.mysql.schema.MySqlSchema;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.state.SnapshotPendingSplitsState;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSnapshotSplit;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
@@ -38,21 +49,11 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.stream.Collectors;
-import javax.annotation.Nullable;
-import org.apache.flink.shaded.guava18.com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.flink.util.Preconditions;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.schema.MySqlSchema;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state.SnapshotPendingSplitsState;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSnapshotSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplit;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+
+import static org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils.discoverCapturedTables;
+import static org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils.openJdbcConnection;
+import static org.apache.inlong.sort.cdc.mysql.source.assigners.AssignerStatus.isAssigningFinished;
+import static org.apache.inlong.sort.cdc.mysql.source.assigners.AssignerStatus.isSuspended;
/**
* A {@link MySqlSplitAssigner} that splits tables into small chunk splits based on primary key
@@ -79,7 +80,8 @@ public class MySqlSnapshotSplitAssigner implements MySqlSplitAssigner {
private ExecutorService executor;
private Object lock;
- @Nullable private Long checkpointIdToFinish;
+ @Nullable
+ private Long checkpointIdToFinish;
public MySqlSnapshotSplitAssigner(
MySqlSourceConfig sourceConfig,
@@ -139,6 +141,12 @@ public class MySqlSnapshotSplitAssigner implements MySqlSplitAssigner {
this.isTableIdCaseSensitive = isTableIdCaseSensitive;
}
+ private static ChunkSplitter createChunkSplitter(
+ MySqlSourceConfig sourceConfig, boolean isTableIdCaseSensitive) {
+ MySqlSchema mySqlSchema = new MySqlSchema(sourceConfig, isTableIdCaseSensitive);
+ return new ChunkSplitter(mySqlSchema, sourceConfig);
+ }
+
@Override
public void open() {
lock = new Object();
@@ -176,7 +184,7 @@ public class MySqlSnapshotSplitAssigner implements MySqlSplitAssigner {
// start the newly added tables process under binlog reading phase
LOG.info(
"Found newly added tables, start capture "
- + "newly added tables process under binlog reading phase");
+ + "newly added tables process under binlog reading phase");
this.suspend();
}
}
@@ -280,12 +288,12 @@ public class MySqlSnapshotSplitAssigner implements MySqlSplitAssigner {
if (currentParallelism == 1) {
assignerStatus = assignerStatus.onFinish();
LOG.info(
- "Snapshot split assigner received all splits finished "
- + "and the job parallelism is 1, snapshot split assigner is turn into finished status.");
+ "Snapshot split assigner received all splits finished and the job parallelism is 1, "
+ + "snapshot split assigner is turn into finished status.");
} else {
LOG.info(
"Snapshot split assigner received all splits finished, "
- + "waiting for a complete checkpoint to mark the assigner finished.");
+ + "waiting for a complete checkpoint to mark the assigner finished.");
}
}
}
@@ -373,7 +381,9 @@ public class MySqlSnapshotSplitAssigner implements MySqlSplitAssigner {
}
}
- /** Indicates there is no more splits available in this assigner. */
+ /**
+ * Indicates there is no more splits available in this assigner.
+ */
public boolean noMoreSplits() {
return remainingTables.isEmpty() && remainingSplits.isEmpty();
}
@@ -382,12 +392,12 @@ public class MySqlSnapshotSplitAssigner implements MySqlSplitAssigner {
return assignedSplits;
}
+ // -------------------------------------------------------------------------------------------
+
public Map<String, BinlogOffset> getSplitFinishedOffsets() {
return splitFinishedOffsets;
}
- // -------------------------------------------------------------------------------------------
-
/**
* Returns whether all splits are finished which means no more splits and all assigned splits
* are finished.
@@ -395,10 +405,4 @@ public class MySqlSnapshotSplitAssigner implements MySqlSplitAssigner {
private boolean allSplitsFinished() {
return noMoreSplits() && assignedSplits.size() == splitFinishedOffsets.size();
}
-
- private static ChunkSplitter createChunkSplitter(
- MySqlSourceConfig sourceConfig, boolean isTableIdCaseSensitive) {
- MySqlSchema mySqlSchema = new MySqlSchema(sourceConfig, isTableIdCaseSensitive);
- return new ChunkSplitter(mySqlSchema, sourceConfig);
- }
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlSplitAssigner.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlSplitAssigner.java
similarity index 87%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlSplitAssigner.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlSplitAssigner.java
index 4253006ff..47f191ee6 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/MySqlSplitAssigner.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/MySqlSplitAssigner.java
@@ -16,17 +16,18 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners;
+package org.apache.inlong.sort.cdc.mysql.source.assigners;
+
+import org.apache.flink.api.common.state.CheckpointListener;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.state.PendingSplitsState;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplit;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Optional;
-import org.apache.flink.api.common.state.CheckpointListener;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state.PendingSplitsState;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplit;
/**
* The {@code MySqlSplitAssigner} is responsible for deciding what split should be processed. It
@@ -99,7 +100,9 @@ public interface MySqlSplitAssigner {
*/
void notifyCheckpointComplete(long checkpointId);
- /** Gets the split assigner status, see {@code AssignerStatus}. */
+ /**
+ * Gets the split assigner status, see {@code AssignerStatus}.
+ */
AssignerStatus getAssignerStatus();
/**
@@ -108,7 +111,9 @@ public interface MySqlSplitAssigner {
*/
void suspend();
- /** Wakes up the assigner under {@link AssignerStatus#SUSPENDED}. */
+ /**
+ * Wakes up the assigner under {@link AssignerStatus#SUSPENDED}.
+ */
void wakeup();
/**
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/BinlogPendingSplitsState.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/BinlogPendingSplitsState.java
similarity index 95%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/BinlogPendingSplitsState.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/BinlogPendingSplitsState.java
index 4d6ea8828..d78a0e16d 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/BinlogPendingSplitsState.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/BinlogPendingSplitsState.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state;
+package org.apache.inlong.sort.cdc.mysql.source.assigners.state;
import java.util.Objects;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/HybridPendingSplitsState.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/HybridPendingSplitsState.java
similarity index 96%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/HybridPendingSplitsState.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/HybridPendingSplitsState.java
index 1a2cabe7e..2b94be546 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/HybridPendingSplitsState.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/HybridPendingSplitsState.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state;
+package org.apache.inlong.sort.cdc.mysql.source.assigners.state;
import java.util.Objects;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/PendingSplitsState.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/PendingSplitsState.java
similarity index 93%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/PendingSplitsState.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/PendingSplitsState.java
index 5dfb8676a..ecbb19c8d 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/PendingSplitsState.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/PendingSplitsState.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state;
+package org.apache.inlong.sort.cdc.mysql.source.assigners.state;
import javax.annotation.Nullable;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/PendingSplitsStateSerializer.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/PendingSplitsStateSerializer.java
similarity index 95%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/PendingSplitsStateSerializer.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/PendingSplitsStateSerializer.java
index 1a20399f7..bb2bd6246 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/PendingSplitsStateSerializer.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/PendingSplitsStateSerializer.java
@@ -16,25 +16,26 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.SerializerUtils.readBinlogPosition;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.SerializerUtils.writeBinlogPosition;
+package org.apache.inlong.sort.cdc.mysql.source.assigners.state;
import io.debezium.relational.TableId;
+import org.apache.flink.core.io.SimpleVersionedSerializer;
+import org.apache.flink.core.memory.DataInputDeserializer;
+import org.apache.flink.core.memory.DataOutputSerializer;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.AssignerStatus;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSnapshotSplit;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplit;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.apache.flink.core.io.SimpleVersionedSerializer;
-import org.apache.flink.core.memory.DataInputDeserializer;
-import org.apache.flink.core.memory.DataOutputSerializer;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.AssignerStatus;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSnapshotSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplit;
+
+import static org.apache.inlong.sort.cdc.mysql.source.utils.SerializerUtils.readBinlogPosition;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.SerializerUtils.writeBinlogPosition;
/**
* The {@link SimpleVersionedSerializer Serializer} for the {@link PendingSplitsState} of MySQL CDC
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/SnapshotPendingSplitsState.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/SnapshotPendingSplitsState.java
similarity index 86%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/SnapshotPendingSplitsState.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/SnapshotPendingSplitsState.java
index 40ff96dd8..4476eea2c 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/assigners/state/SnapshotPendingSplitsState.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/assigners/state/SnapshotPendingSplitsState.java
@@ -16,22 +16,25 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state;
+package org.apache.inlong.sort.cdc.mysql.source.assigners.state;
import io.debezium.relational.TableId;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.AssignerStatus;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSnapshotSplit;
+
import java.util.List;
import java.util.Map;
import java.util.Objects;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.AssignerStatus;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSplitReader;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSnapshotSplit;
-/** A {@link PendingSplitsState} for pending snapshot splits. */
+/**
+ * A {@link PendingSplitsState} for pending snapshot splits.
+ */
public class SnapshotPendingSplitsState extends PendingSplitsState {
- /** The tables in the checkpoint. */
+ /**
+ * The tables in the checkpoint.
+ */
private final List<TableId> remainingTables;
/**
@@ -40,7 +43,9 @@ public class SnapshotPendingSplitsState extends PendingSplitsState {
*/
private final List<TableId> alreadyProcessedTables;
- /** The splits in the checkpoint. */
+ /**
+ * The splits in the checkpoint.
+ */
private final List<MySqlSnapshotSplit> remainingSplits;
/**
@@ -55,13 +60,19 @@ public class SnapshotPendingSplitsState extends PendingSplitsState {
*/
private final Map<String, BinlogOffset> splitFinishedOffsets;
- /** The {@link AssignerStatus} that indicates the snapshot assigner status. */
+ /**
+ * The {@link AssignerStatus} that indicates the snapshot assigner status.
+ */
private final AssignerStatus assignerStatus;
- /** Whether the table identifier is case-sensitive. */
+ /**
+ * Whether the table identifier is case-sensitive.
+ */
private final boolean isTableIdCaseSensitive;
- /** Whether the remaining tables are keep when snapshot state. */
+ /**
+ * Whether the remaining tables are keep when snapshot state.
+ */
private final boolean isRemainingTablesCheckpointed;
public SnapshotPendingSplitsState(
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/MySqlSourceConfig.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/MySqlSourceConfig.java
similarity index 94%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/MySqlSourceConfig.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/MySqlSourceConfig.java
index b8f89e9a9..65ed4d740 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/MySqlSourceConfig.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/MySqlSourceConfig.java
@@ -16,22 +16,25 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config;
-
-import static org.apache.flink.util.Preconditions.checkNotNull;
+package org.apache.inlong.sort.cdc.mysql.source.config;
import io.debezium.config.Configuration;
import io.debezium.connector.mysql.MySqlConnectorConfig;
import io.debezium.relational.RelationalTableFilters;
+import org.apache.inlong.sort.cdc.mysql.source.MySqlSource;
+import org.apache.inlong.sort.cdc.mysql.table.StartupOptions;
+
+import javax.annotation.Nullable;
import java.io.Serializable;
import java.time.Duration;
import java.util.List;
import java.util.Properties;
-import javax.annotation.Nullable;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.MySqlSource;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.table.StartupOptions;
-/** A MySql Source configuration which is used by {@link MySqlSource}. */
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * A MySql Source configuration which is used by {@link MySqlSource}.
+ */
public class MySqlSourceConfig implements Serializable {
private static final long serialVersionUID = 1L;
@@ -41,7 +44,8 @@ public class MySqlSourceConfig implements Serializable {
private final String password;
private final List<String> databaseList;
private final List<String> tableList;
- @Nullable private final ServerIdRange serverIdRange;
+ @Nullable
+ private final ServerIdRange serverIdRange;
private final StartupOptions startupOptions;
private final int splitSize;
private final int splitMetaGroupSize;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/MySqlSourceConfigFactory.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/MySqlSourceConfigFactory.java
similarity index 83%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/MySqlSourceConfigFactory.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/MySqlSourceConfigFactory.java
index b01e60427..035269224 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/MySqlSourceConfigFactory.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/MySqlSourceConfigFactory.java
@@ -16,19 +16,11 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config;
+package org.apache.inlong.sort.cdc.mysql.source.config;
-import static org.apache.flink.util.Preconditions.checkNotNull;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.CHUNK_META_GROUP_SIZE;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.CONNECTION_POOL_SIZE;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.CONNECT_MAX_RETRIES;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.CONNECT_TIMEOUT;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.HEARTBEAT_INTERVAL;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SCAN_INCREMENTAL_SNAPSHOT_CHUNK_SIZE;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SCAN_SNAPSHOT_FETCH_SIZE;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SERVER_TIME_ZONE;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SPLIT_KEY_EVEN_DISTRIBUTION_FACTOR_LOWER_BOUND;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SPLIT_KEY_EVEN_DISTRIBUTION_FACTOR_UPPER_BOUND;
+import org.apache.flink.annotation.Internal;
+import org.apache.inlong.sort.cdc.mysql.debezium.EmbeddedFlinkDatabaseHistory;
+import org.apache.inlong.sort.cdc.mysql.table.StartupOptions;
import java.io.Serializable;
import java.time.Duration;
@@ -36,12 +28,22 @@ import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import java.util.UUID;
-import org.apache.flink.annotation.Internal;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.EmbeddedFlinkDatabaseHistory;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.MySqlSource;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.table.StartupOptions;
-/** A factory to construct {@link MySqlSourceConfig}. */
+import static org.apache.flink.util.Preconditions.checkNotNull;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.CHUNK_META_GROUP_SIZE;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.CONNECTION_POOL_SIZE;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.CONNECT_MAX_RETRIES;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.CONNECT_TIMEOUT;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.HEARTBEAT_INTERVAL;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SCAN_INCREMENTAL_SNAPSHOT_CHUNK_SIZE;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SCAN_SNAPSHOT_FETCH_SIZE;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SERVER_TIME_ZONE;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SPLIT_KEY_EVEN_DISTRIBUTION_FACTOR_LOWER_BOUND;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SPLIT_KEY_EVEN_DISTRIBUTION_FACTOR_UPPER_BOUND;
+
+/**
+ * A factory to construct {@link MySqlSourceConfig}.
+ */
@Internal
public class MySqlSourceConfigFactory implements Serializable {
@@ -77,7 +79,9 @@ public class MySqlSourceConfigFactory implements Serializable {
return this;
}
- /** Integer port number of the MySQL database server. */
+ /**
+ * Integer port number of the MySQL database server.
+ */
public MySqlSourceConfigFactory port(int port) {
this.port = port;
return this;
@@ -104,13 +108,17 @@ public class MySqlSourceConfigFactory implements Serializable {
return this;
}
- /** Name of the MySQL database to use when connecting to the MySQL database server. */
+ /**
+ * Name of the MySQL database to use when connecting to the MySQL database server.
+ */
public MySqlSourceConfigFactory username(String username) {
this.username = username;
return this;
}
- /** Password to use when connecting to the MySQL database server. */
+ /**
+ * Password to use when connecting to the MySQL database server.
+ */
public MySqlSourceConfigFactory password(String password) {
this.password = password;
return this;
@@ -176,7 +184,9 @@ public class MySqlSourceConfigFactory implements Serializable {
return this;
}
- /** The maximum fetch size for per poll when read table snapshot. */
+ /**
+ * The maximum fetch size for per poll when read table snapshot.
+ */
public MySqlSourceConfigFactory fetchSize(int fetchSize) {
this.fetchSize = fetchSize;
return this;
@@ -191,37 +201,49 @@ public class MySqlSourceConfigFactory implements Serializable {
return this;
}
- /** The connection pool size. */
+ /**
+ * The connection pool size.
+ */
public MySqlSourceConfigFactory connectionPoolSize(int connectionPoolSize) {
this.connectionPoolSize = connectionPoolSize;
return this;
}
- /** The max retry times to get connection. */
+ /**
+ * The max retry times to get connection.
+ */
public MySqlSourceConfigFactory connectMaxRetries(int connectMaxRetries) {
this.connectMaxRetries = connectMaxRetries;
return this;
}
- /** Whether the {@link MySqlSource} should output the schema changes or not. */
+ /**
+ * Whether the {@link MySqlSource} should output the schema changes or not.
+ */
public MySqlSourceConfigFactory includeSchemaChanges(boolean includeSchemaChanges) {
this.includeSchemaChanges = includeSchemaChanges;
return this;
}
- /** Whether the {@link MySqlSource} should scan the newly added tables or not. */
+ /**
+ * Whether the {@link MySqlSource} should scan the newly added tables or not.
+ */
public MySqlSourceConfigFactory scanNewlyAddedTableEnabled(boolean scanNewlyAddedTableEnabled) {
this.scanNewlyAddedTableEnabled = scanNewlyAddedTableEnabled;
return this;
}
- /** Custom properties that will overwrite the default JDBC connection URL. */
+ /**
+ * Custom properties that will overwrite the default JDBC connection URL.
+ */
public MySqlSourceConfigFactory jdbcProperties(Properties jdbcProperties) {
this.jdbcProperties = jdbcProperties;
return this;
}
- /** Specifies the startup options. */
+ /**
+ * Specifies the startup options.
+ */
public MySqlSourceConfigFactory startupOptions(StartupOptions startupOptions) {
switch (startupOptions.startupMode) {
case INITIAL:
@@ -240,13 +262,17 @@ public class MySqlSourceConfigFactory implements Serializable {
return this;
}
- /** The Debezium MySQL connector properties. For example, "snapshot.mode". */
+ /**
+ * The Debezium MySQL connector properties. For example, "snapshot.mode".
+ */
public MySqlSourceConfigFactory debeziumProperties(Properties properties) {
this.dbzProperties = properties;
return this;
}
- /** Creates a new {@link MySqlSourceConfig} for the given subtask {@code subtaskId}. */
+ /**
+ * Creates a new {@link MySqlSourceConfig} for the given subtask {@code subtaskId}.
+ */
public MySqlSourceConfig createConfig(int subtaskId) {
Properties props = new Properties();
// hard code server name, because we don't need to distinguish it, docs:
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/MySqlSourceOptions.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/MySqlSourceOptions.java
similarity index 97%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/MySqlSourceOptions.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/MySqlSourceOptions.java
index c1011aa5f..8a211cf19 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/MySqlSourceOptions.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/MySqlSourceOptions.java
@@ -16,13 +16,14 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config;
+package org.apache.inlong.sort.cdc.mysql.source.config;
-import java.time.Duration;
import org.apache.flink.annotation.Experimental;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.MySqlSource;
+import org.apache.inlong.sort.cdc.mysql.source.MySqlSource;
+
+import java.time.Duration;
/**
* Configurations for {@link MySqlSource}.
@@ -190,10 +191,10 @@ public class MySqlSourceOptions {
.withDescription("Whether works as append source.");
public static final ConfigOption<Boolean> MIGRATE_ALL =
- ConfigOptions.key("migrate-all")
- .booleanType()
- .defaultValue(false)
- .withDescription("Whether migrate all databases");
+ ConfigOptions.key("migrate-all")
+ .booleanType()
+ .defaultValue(false)
+ .withDescription("Whether migrate all databases");
// ----------------------------------------------------------------------------
// experimental options, won't add them to documentation
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/ServerIdRange.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/ServerIdRange.java
similarity index 98%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/ServerIdRange.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/ServerIdRange.java
index 7caf48c92..4346ef9d2 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/config/ServerIdRange.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/config/ServerIdRange.java
@@ -16,12 +16,12 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config;
+package org.apache.inlong.sort.cdc.mysql.source.config;
-import static org.apache.flink.util.Preconditions.checkArgument;
-
-import java.io.Serializable;
import javax.annotation.Nullable;
+import java.io.Serializable;
+
+import static org.apache.flink.util.Preconditions.checkArgument;
/**
* This class defines a range of server id. The boundaries of the range are inclusive.
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/ConnectionPoolId.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/ConnectionPoolId.java
similarity index 95%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/ConnectionPoolId.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/ConnectionPoolId.java
index 947f56c64..ab1e18c3e 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/ConnectionPoolId.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/ConnectionPoolId.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.connection;
+package org.apache.inlong.sort.cdc.mysql.source.connection;
import java.io.Serializable;
import java.util.Objects;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/ConnectionPools.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/ConnectionPools.java
similarity index 83%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/ConnectionPools.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/ConnectionPools.java
index b57e85327..499b47dc5 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/ConnectionPools.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/ConnectionPools.java
@@ -16,13 +16,15 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.connection;
+package org.apache.inlong.sort.cdc.mysql.source.connection;
import com.zaxxer.hikari.HikariDataSource;
import org.apache.flink.annotation.Internal;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
-/** A JDBC connection pools that consists of {@link HikariDataSource}. */
+/**
+ * A JDBC connection pools that consists of {@link HikariDataSource}.
+ */
@Internal
public interface ConnectionPools {
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/JdbcConnectionFactory.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/JdbcConnectionFactory.java
similarity index 92%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/JdbcConnectionFactory.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/JdbcConnectionFactory.java
index 238218eb5..8eb2dbabc 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/JdbcConnectionFactory.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/JdbcConnectionFactory.java
@@ -16,19 +16,22 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.connection;
+package org.apache.inlong.sort.cdc.mysql.source.connection;
import com.zaxxer.hikari.HikariDataSource;
import io.debezium.jdbc.JdbcConfiguration;
import io.debezium.jdbc.JdbcConnection;
-import java.sql.Connection;
-import java.sql.SQLException;
import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/** A factory to create JDBC connection for MySQL. */
+import java.sql.Connection;
+import java.sql.SQLException;
+
+/**
+ * A factory to create JDBC connection for MySQL.
+ */
public class JdbcConnectionFactory implements JdbcConnection.ConnectionFactory {
private static final Logger LOG = LoggerFactory.getLogger(JdbcConnectionFactory.class);
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/JdbcConnectionPools.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/JdbcConnectionPools.java
similarity index 84%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/JdbcConnectionPools.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/JdbcConnectionPools.java
index 47a531540..2897c0066 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/JdbcConnectionPools.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/JdbcConnectionPools.java
@@ -16,18 +16,21 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.connection;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.connection.PooledDataSourceFactory.createPooledDataSource;
+package org.apache.inlong.sort.cdc.mysql.source.connection;
import com.zaxxer.hikari.HikariDataSource;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/** A Jdbc Connection pools implementation. */
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.inlong.sort.cdc.mysql.source.connection.PooledDataSourceFactory.createPooledDataSource;
+
+/**
+ * A Jdbc Connection pools implementation.
+ */
public class JdbcConnectionPools implements ConnectionPools {
private static final Logger LOG = LoggerFactory.getLogger(JdbcConnectionPools.class);
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/PooledDataSourceFactory.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/PooledDataSourceFactory.java
similarity index 93%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/PooledDataSourceFactory.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/PooledDataSourceFactory.java
index 2606fa93b..c75faba8a 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/connection/PooledDataSourceFactory.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/connection/PooledDataSourceFactory.java
@@ -16,15 +16,18 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.connection;
+package org.apache.inlong.sort.cdc.mysql.source.connection;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import io.debezium.connector.mysql.MySqlConnectorConfig;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
+
import java.util.Properties;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
-/** A connection pool factory to create pooled DataSource {@link HikariDataSource}. */
+/**
+ * A connection pool factory to create pooled DataSource {@link HikariDataSource}.
+ */
public class PooledDataSourceFactory {
public static final String JDBC_URL_PATTERN =
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/enumerator/MySqlSourceEnumerator.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/enumerator/MySqlSourceEnumerator.java
similarity index 85%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/enumerator/MySqlSourceEnumerator.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/enumerator/MySqlSourceEnumerator.java
index 3925a95d2..62d375367 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/enumerator/MySqlSourceEnumerator.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/enumerator/MySqlSourceEnumerator.java
@@ -16,46 +16,47 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.enumerator;
+package org.apache.inlong.sort.cdc.mysql.source.enumerator;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.AssignerStatus.isAssigning;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.AssignerStatus.isAssigningFinished;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.AssignerStatus.isSuspended;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.TreeSet;
-import java.util.stream.Collectors;
-import javax.annotation.Nullable;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.connector.source.SourceEvent;
import org.apache.flink.api.connector.source.SplitEnumerator;
import org.apache.flink.api.connector.source.SplitEnumeratorContext;
import org.apache.flink.shaded.guava18.com.google.common.collect.Lists;
import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.MySqlHybridSplitAssigner;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.MySqlSplitAssigner;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.assigners.state.PendingSplitsState;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.BinlogSplitMetaEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.BinlogSplitMetaRequestEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.FinishedSnapshotSplitsAckEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.FinishedSnapshotSplitsReportEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.FinishedSnapshotSplitsRequestEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.LatestFinishedSplitsSizeEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.LatestFinishedSplitsSizeRequestEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.SuspendBinlogReaderAckEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.SuspendBinlogReaderEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.WakeupReaderEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplit;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.MySqlHybridSplitAssigner;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.MySqlSplitAssigner;
+import org.apache.inlong.sort.cdc.mysql.source.assigners.state.PendingSplitsState;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
+import org.apache.inlong.sort.cdc.mysql.source.events.BinlogSplitMetaEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.BinlogSplitMetaRequestEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.FinishedSnapshotSplitsAckEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.FinishedSnapshotSplitsReportEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.FinishedSnapshotSplitsRequestEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.LatestFinishedSplitsSizeEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.LatestFinishedSplitsSizeRequestEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.SuspendBinlogReaderAckEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.SuspendBinlogReaderEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.WakeupReaderEvent;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import javax.annotation.Nullable;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.TreeSet;
+import java.util.stream.Collectors;
+
+import static org.apache.inlong.sort.cdc.mysql.source.assigners.AssignerStatus.isAssigning;
+import static org.apache.inlong.sort.cdc.mysql.source.assigners.AssignerStatus.isAssigningFinished;
+import static org.apache.inlong.sort.cdc.mysql.source.assigners.AssignerStatus.isSuspended;
+
/**
* A MySQL CDC source enumerator that enumerates receive the split request and assign the split to
* source readers.
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/BinlogSplitMetaEvent.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/BinlogSplitMetaEvent.java
similarity index 81%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/BinlogSplitMetaEvent.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/BinlogSplitMetaEvent.java
index c492263fd..7c5312d09 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/BinlogSplitMetaEvent.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/BinlogSplitMetaEvent.java
@@ -16,13 +16,14 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events;
+package org.apache.inlong.sort.cdc.mysql.source.events;
-import java.util.List;
import org.apache.flink.api.connector.source.SourceEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSourceReader;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
+import org.apache.inlong.sort.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlSourceReader;
+import org.apache.inlong.sort.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
+
+import java.util.List;
/**
* The {@link SourceEvent} that {@link MySqlSourceEnumerator} sends to {@link MySqlSourceReader} to
@@ -34,7 +35,9 @@ public class BinlogSplitMetaEvent implements SourceEvent {
private final String splitId;
- /** The meta data of binlog split is divided to multiple groups. */
+ /**
+ * The meta data of binlog split is divided to multiple groups.
+ */
private final int metaGroupId;
/**
* The serialized meta data of binlog split, it's serialized/deserialize by {@link
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/BinlogSplitMetaRequestEvent.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/BinlogSplitMetaRequestEvent.java
similarity index 85%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/BinlogSplitMetaRequestEvent.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/BinlogSplitMetaRequestEvent.java
index 428cbc883..d417e23f6 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/BinlogSplitMetaRequestEvent.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/BinlogSplitMetaRequestEvent.java
@@ -16,11 +16,11 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events;
+package org.apache.inlong.sort.cdc.mysql.source.events;
import org.apache.flink.api.connector.source.SourceEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSourceReader;
+import org.apache.inlong.sort.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlSourceReader;
/**
* The {@link SourceEvent} that {@link MySqlSourceReader} sends to {@link MySqlSourceEnumerator} to
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/FinishedSnapshotSplitsAckEvent.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/FinishedSnapshotSplitsAckEvent.java
similarity index 86%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/FinishedSnapshotSplitsAckEvent.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/FinishedSnapshotSplitsAckEvent.java
index fcc3a51b6..0bb00d1ff 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/FinishedSnapshotSplitsAckEvent.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/FinishedSnapshotSplitsAckEvent.java
@@ -16,12 +16,13 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events;
+package org.apache.inlong.sort.cdc.mysql.source.events;
-import java.util.List;
import org.apache.flink.api.connector.source.SourceEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSourceReader;
+import org.apache.inlong.sort.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlSourceReader;
+
+import java.util.List;
/**
* The {@link SourceEvent} that {@link MySqlSourceEnumerator} sends to {@link MySqlSourceReader} to
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/FinishedSnapshotSplitsReportEvent.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/FinishedSnapshotSplitsReportEvent.java
similarity index 82%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/FinishedSnapshotSplitsReportEvent.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/FinishedSnapshotSplitsReportEvent.java
index 5d4a52b7a..3989919fb 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/FinishedSnapshotSplitsReportEvent.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/FinishedSnapshotSplitsReportEvent.java
@@ -16,13 +16,14 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events;
+package org.apache.inlong.sort.cdc.mysql.source.events;
-import java.util.Map;
import org.apache.flink.api.connector.source.SourceEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSourceReader;
+import org.apache.inlong.sort.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlSourceReader;
+
+import java.util.Map;
/**
* The {@link SourceEvent} that {@link MySqlSourceReader} sends to {@link MySqlSourceEnumerator} to
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/FinishedSnapshotSplitsRequestEvent.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/FinishedSnapshotSplitsRequestEvent.java
similarity index 82%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/FinishedSnapshotSplitsRequestEvent.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/FinishedSnapshotSplitsRequestEvent.java
index fb9721ff4..15764c972 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/FinishedSnapshotSplitsRequestEvent.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/FinishedSnapshotSplitsRequestEvent.java
@@ -16,11 +16,11 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events;
+package org.apache.inlong.sort.cdc.mysql.source.events;
import org.apache.flink.api.connector.source.SourceEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSourceReader;
+import org.apache.inlong.sort.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlSourceReader;
/**
* The {@link SourceEvent} that {@link MySqlSourceEnumerator} sends to {@link MySqlSourceReader} to
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/LatestFinishedSplitsSizeEvent.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/LatestFinishedSplitsSizeEvent.java
similarity index 86%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/LatestFinishedSplitsSizeEvent.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/LatestFinishedSplitsSizeEvent.java
index fe7fb40e7..cfb31cd75 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/LatestFinishedSplitsSizeEvent.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/LatestFinishedSplitsSizeEvent.java
@@ -16,11 +16,11 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events;
+package org.apache.inlong.sort.cdc.mysql.source.events;
import org.apache.flink.api.connector.source.SourceEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSourceReader;
+import org.apache.inlong.sort.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlSourceReader;
/**
* The {@link SourceEvent} that {@link MySqlSourceEnumerator} sends to {@link MySqlSourceReader} to
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/LatestFinishedSplitsSizeRequestEvent.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/LatestFinishedSplitsSizeRequestEvent.java
similarity index 81%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/LatestFinishedSplitsSizeRequestEvent.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/LatestFinishedSplitsSizeRequestEvent.java
index 3274f0e33..1cf9a463f 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/LatestFinishedSplitsSizeRequestEvent.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/LatestFinishedSplitsSizeRequestEvent.java
@@ -16,11 +16,11 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events;
+package org.apache.inlong.sort.cdc.mysql.source.events;
import org.apache.flink.api.connector.source.SourceEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSourceReader;
+import org.apache.inlong.sort.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlSourceReader;
/**
* The {@link SourceEvent} that {@link MySqlSourceReader} sends to {@link MySqlSourceEnumerator} to
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/SuspendBinlogReaderAckEvent.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/SuspendBinlogReaderAckEvent.java
similarity index 81%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/SuspendBinlogReaderAckEvent.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/SuspendBinlogReaderAckEvent.java
index fa8f1c9cb..24d58c41b 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/SuspendBinlogReaderAckEvent.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/SuspendBinlogReaderAckEvent.java
@@ -16,11 +16,11 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events;
+package org.apache.inlong.sort.cdc.mysql.source.events;
import org.apache.flink.api.connector.source.SourceEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSourceReader;
+import org.apache.inlong.sort.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlSourceReader;
/**
* The {@link SourceEvent} that {@link MySqlSourceReader} sends to {@link MySqlSourceEnumerator} to
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/SuspendBinlogReaderEvent.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/SuspendBinlogReaderEvent.java
similarity index 81%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/SuspendBinlogReaderEvent.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/SuspendBinlogReaderEvent.java
index 4273546d6..0fcdf0a6c 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/SuspendBinlogReaderEvent.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/SuspendBinlogReaderEvent.java
@@ -16,11 +16,11 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events;
+package org.apache.inlong.sort.cdc.mysql.source.events;
import org.apache.flink.api.connector.source.SourceEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSourceReader;
+import org.apache.inlong.sort.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlSourceReader;
/**
* The {@link SourceEvent} that {@link MySqlSourceEnumerator} broadcasts to {@link
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/WakeupReaderEvent.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/WakeupReaderEvent.java
similarity index 82%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/WakeupReaderEvent.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/WakeupReaderEvent.java
index a5a19cb7a..20b017fbe 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/events/WakeupReaderEvent.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/events/WakeupReaderEvent.java
@@ -16,11 +16,11 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events;
+package org.apache.inlong.sort.cdc.mysql.source.events;
import org.apache.flink.api.connector.source.SourceEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSourceReader;
+import org.apache.inlong.sort.cdc.mysql.source.enumerator.MySqlSourceEnumerator;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlSourceReader;
/**
* The {@link SourceEvent} that {@link MySqlSourceEnumerator} sends to {@link MySqlSourceReader} to
@@ -28,13 +28,6 @@ import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSo
*/
public class WakeupReaderEvent implements SourceEvent {
private static final long serialVersionUID = 1L;
-
- /** Wake up target. */
- public enum WakeUpTarget {
- SNAPSHOT_READER,
- BINLOG_READER
- }
-
private WakeUpTarget target;
public WakeupReaderEvent(WakeUpTarget target) {
@@ -44,4 +37,12 @@ public class WakeupReaderEvent implements SourceEvent {
public WakeUpTarget getTarget() {
return target;
}
+
+ /**
+ * Wake up target.
+ */
+ public enum WakeUpTarget {
+ SNAPSHOT_READER,
+ BINLOG_READER
+ }
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/metrics/MySqlSourceReaderMetrics.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/metrics/MySqlSourceReaderMetrics.java
similarity index 91%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/metrics/MySqlSourceReaderMetrics.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/metrics/MySqlSourceReaderMetrics.java
index 6ad00c812..1551975fd 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/metrics/MySqlSourceReaderMetrics.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/metrics/MySqlSourceReaderMetrics.java
@@ -16,13 +16,15 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.metrics;
+package org.apache.inlong.sort.cdc.mysql.source.metrics;
import org.apache.flink.metrics.Gauge;
import org.apache.flink.metrics.MetricGroup;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader.MySqlSourceReader;
+import org.apache.inlong.sort.cdc.mysql.source.reader.MySqlSourceReader;
-/** A collection class for handling metrics in {@link MySqlSourceReader}. */
+/**
+ * A collection class for handling metrics in {@link MySqlSourceReader}.
+ */
public class MySqlSourceReaderMetrics {
private final MetricGroup metricGroup;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/offset/BinlogOffset.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/offset/BinlogOffset.java
similarity index 99%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/offset/BinlogOffset.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/offset/BinlogOffset.java
index 87c13f4ff..b8c19b6c8 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/offset/BinlogOffset.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/offset/BinlogOffset.java
@@ -16,16 +16,17 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset;
+package org.apache.inlong.sort.cdc.mysql.source.offset;
import io.debezium.connector.mysql.GtidSet;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.kafka.connect.errors.ConnectException;
+
+import javax.annotation.Nullable;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
-import javax.annotation.Nullable;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.kafka.connect.errors.ConnectException;
/**
* A structure describes a fine grained offset in a binlog event including binlog position and gtid
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/offset/BinlogOffsetSerializer.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/offset/BinlogOffsetSerializer.java
similarity index 95%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/offset/BinlogOffsetSerializer.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/offset/BinlogOffsetSerializer.java
index 37a364aaf..604593b0b 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/offset/BinlogOffsetSerializer.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/offset/BinlogOffsetSerializer.java
@@ -16,13 +16,14 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset;
+package org.apache.inlong.sort.cdc.mysql.source.offset;
-import java.io.IOException;
-import java.util.Map;
import org.apache.flink.annotation.Internal;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
+import java.io.IOException;
+import java.util.Map;
+
/** Serializer implementation for a {@link BinlogOffset}. */
@Internal
public class BinlogOffsetSerializer {
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlRecordEmitter.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlRecordEmitter.java
similarity index 85%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlRecordEmitter.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlRecordEmitter.java
index 4d2737f7f..68c561892 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlRecordEmitter.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlRecordEmitter.java
@@ -16,18 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.getBinlogPosition;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.getFetchTimestamp;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.getHistoryRecord;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.getMessageTimestamp;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.getWatermark;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.isDataChangeRecord;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.isHeartbeatEvent;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.isHighWatermarkEvent;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.isSchemaChangeEvent;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.isWatermarkEvent;
+package org.apache.inlong.sort.cdc.mysql.source.reader;
import com.ververica.cdc.connectors.mysql.source.utils.RecordUtils;
import io.debezium.data.Envelope;
@@ -36,21 +25,33 @@ import io.debezium.relational.TableId;
import io.debezium.relational.history.HistoryRecord;
import io.debezium.relational.history.TableChanges;
import io.debezium.relational.history.TableChanges.TableChange;
-import java.util.Map;
import org.apache.flink.api.connector.source.SourceOutput;
import org.apache.flink.connector.base.source.reader.RecordEmitter;
import org.apache.flink.util.Collector;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.DebeziumDeserializationSchema;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.history.FlinkJsonTableChangeSerializer;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.metrics.MySqlSourceReaderMetrics;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplitState;
+import org.apache.inlong.sort.cdc.debezium.DebeziumDeserializationSchema;
+import org.apache.inlong.sort.cdc.debezium.history.FlinkJsonTableChangeSerializer;
+import org.apache.inlong.sort.cdc.mysql.source.metrics.MySqlSourceReaderMetrics;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplitState;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.Map;
+
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.getBinlogPosition;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.getFetchTimestamp;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.getHistoryRecord;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.getMessageTimestamp;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.getWatermark;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.isDataChangeRecord;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.isHeartbeatEvent;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.isHighWatermarkEvent;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.isSchemaChangeEvent;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.isWatermarkEvent;
+
/**
* The {@link RecordEmitter} implementation for {@link MySqlSourceReader}.
*
@@ -168,7 +169,7 @@ public final class MySqlRecordEmitter<T>
}
private void emitElement(SourceRecord element, SourceOutput<T> output,
- TableChange tableSchema) throws Exception {
+ TableChange tableSchema) throws Exception {
outputCollector.output = output;
debeziumDeserializationSchema.deserialize(element, outputCollector, tableSchema);
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlSourceReader.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlSourceReader.java
similarity index 83%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlSourceReader.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlSourceReader.java
index 678754e5a..07c96d542 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlSourceReader.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlSourceReader.java
@@ -16,25 +16,11 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.WakeupReaderEvent.WakeUpTarget.SNAPSHOT_READER;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlBinlogSplit.toNormalBinlogSplit;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlBinlogSplit.toSuspendedBinlogSplit;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.ChunkUtils.getNextMetaGroupId;
+package org.apache.inlong.sort.cdc.mysql.source.reader;
import io.debezium.connector.mysql.MySqlConnection;
import io.debezium.relational.TableId;
import io.debezium.relational.history.TableChanges;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Supplier;
-import java.util.stream.Collectors;
import org.apache.flink.api.connector.source.SourceEvent;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.base.source.reader.RecordEmitter;
@@ -43,35 +29,52 @@ import org.apache.flink.connector.base.source.reader.SingleThreadMultiplexSource
import org.apache.flink.connector.base.source.reader.fetcher.SingleThreadFetcherManager;
import org.apache.flink.connector.base.source.reader.synchronization.FutureCompletingBlockingQueue;
import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.BinlogSplitMetaEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.BinlogSplitMetaRequestEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.FinishedSnapshotSplitsAckEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.FinishedSnapshotSplitsReportEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.FinishedSnapshotSplitsRequestEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.LatestFinishedSplitsSizeEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.LatestFinishedSplitsSizeRequestEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.SuspendBinlogReaderAckEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.SuspendBinlogReaderEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.events.WakeupReaderEvent;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlBinlogSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlBinlogSplitState;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSnapshotSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSnapshotSplitState;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplit;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplitState;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.TableDiscoveryUtils;
+import org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
+import org.apache.inlong.sort.cdc.mysql.source.events.BinlogSplitMetaEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.BinlogSplitMetaRequestEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.FinishedSnapshotSplitsAckEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.FinishedSnapshotSplitsReportEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.FinishedSnapshotSplitsRequestEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.LatestFinishedSplitsSizeEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.LatestFinishedSplitsSizeRequestEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.SuspendBinlogReaderAckEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.SuspendBinlogReaderEvent;
+import org.apache.inlong.sort.cdc.mysql.source.events.WakeupReaderEvent;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlBinlogSplit;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlBinlogSplitState;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSnapshotSplit;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSnapshotSplitState;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplit;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplitState;
+import org.apache.inlong.sort.cdc.mysql.source.utils.TableDiscoveryUtils;
import org.apache.kafka.connect.source.SourceRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/** The source reader for MySQL source splits. */
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import static org.apache.inlong.sort.cdc.mysql.source.events.WakeupReaderEvent.WakeUpTarget.SNAPSHOT_READER;
+import static org.apache.inlong.sort.cdc.mysql.source.split.MySqlBinlogSplit.toNormalBinlogSplit;
+import static org.apache.inlong.sort.cdc.mysql.source.split.MySqlBinlogSplit.toSuspendedBinlogSplit;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.ChunkUtils.getNextMetaGroupId;
+
+/**
+ * The source reader for MySQL source splits.
+ */
public class MySqlSourceReader<T>
extends SingleThreadMultiplexSourceReaderBase<
- SourceRecord, T, MySqlSplit, MySqlSplitState> {
+ SourceRecord, T, MySqlSplit, MySqlSplitState> {
private static final Logger LOG = LoggerFactory.getLogger(MySqlSourceReader.class);
@@ -203,7 +206,7 @@ public class MySqlSourceReader<T>
final String splitId = split.splitId();
if (split.getTableSchemas().isEmpty()) {
try (MySqlConnection jdbc =
- DebeziumUtils.createMySqlConnection(sourceConfig.getDbzConfiguration())) {
+ DebeziumUtils.createMySqlConnection(sourceConfig.getDbzConfiguration())) {
Map<TableId, TableChanges.TableChange> tableSchemas =
TableDiscoveryUtils.discoverCapturedTableSchemas(sourceConfig, jdbc);
LOG.info("The table schema discovery for binlog split {} success", splitId);
@@ -322,7 +325,7 @@ public class MySqlSourceReader<T>
} else {
LOG.warn(
"Received out of oder binlog meta event for split {}, "
- + "the received meta group id is {}, but expected is {}, ignore it",
+ + "the received meta group id is {}, but expected is {}, ignore it",
metadataEvent.getSplitId(),
receivedMetaGroupId,
expectedMetaGroupId);
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlSourceReaderContext.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlSourceReaderContext.java
similarity index 95%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlSourceReaderContext.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlSourceReaderContext.java
index d12a20908..a25db9f27 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlSourceReaderContext.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlSourceReaderContext.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader;
+package org.apache.inlong.sort.cdc.mysql.source.reader;
import org.apache.flink.api.connector.source.SourceReaderContext;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlSplitReader.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlSplitReader.java
similarity index 83%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlSplitReader.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlSplitReader.java
index efe280781..1c5334498 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/reader/MySqlSplitReader.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/reader/MySqlSplitReader.java
@@ -16,35 +16,38 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.reader;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils.createBinaryClient;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.DebeziumUtils.createMySqlConnection;
+package org.apache.inlong.sort.cdc.mysql.source.reader;
import com.github.shyiko.mysql.binlog.BinaryLogClient;
import io.debezium.connector.mysql.MySqlConnection;
-import java.io.IOException;
-import java.util.ArrayDeque;
-import java.util.Iterator;
-import java.util.Queue;
-import javax.annotation.Nullable;
import org.apache.flink.connector.base.source.reader.RecordsWithSplitIds;
import org.apache.flink.connector.base.source.reader.splitreader.SplitReader;
import org.apache.flink.connector.base.source.reader.splitreader.SplitsAddition;
import org.apache.flink.connector.base.source.reader.splitreader.SplitsChange;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.reader.BinlogSplitReader;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.reader.DebeziumReader;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.reader.SnapshotSplitReader;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.task.context.StatefulTaskContext;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.MySqlSource;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlRecords;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSplit;
+import org.apache.inlong.sort.cdc.mysql.debezium.reader.BinlogSplitReader;
+import org.apache.inlong.sort.cdc.mysql.debezium.reader.DebeziumReader;
+import org.apache.inlong.sort.cdc.mysql.debezium.reader.SnapshotSplitReader;
+import org.apache.inlong.sort.cdc.mysql.debezium.task.context.StatefulTaskContext;
+import org.apache.inlong.sort.cdc.mysql.source.MySqlSource;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlRecords;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSplit;
import org.apache.kafka.connect.source.SourceRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/** The {@link SplitReader} implementation for the {@link MySqlSource}. */
+import javax.annotation.Nullable;
+import java.io.IOException;
+import java.util.ArrayDeque;
+import java.util.Iterator;
+import java.util.Queue;
+
+import static org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils.createBinaryClient;
+import static org.apache.inlong.sort.cdc.mysql.debezium.DebeziumUtils.createMySqlConnection;
+
+/**
+ * The {@link SplitReader} implementation for the {@link MySqlSource}.
+ */
public class MySqlSplitReader implements SplitReader<SourceRecord, MySqlSplit> {
private static final Logger LOG = LoggerFactory.getLogger(MySqlSplitReader.class);
@@ -53,8 +56,10 @@ public class MySqlSplitReader implements SplitReader<SourceRecord, MySqlSplit> {
private final int subtaskId;
private final MySqlSourceReaderContext context;
- @Nullable private DebeziumReader<SourceRecord, MySqlSplit> currentReader;
- @Nullable private String currentSplitId;
+ @Nullable
+ private DebeziumReader<SourceRecord, MySqlSplit> currentReader;
+ @Nullable
+ private String currentSplitId;
public MySqlSplitReader(
MySqlSourceConfig sourceConfig, int subtaskId, MySqlSourceReaderContext context) {
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/FinishedSnapshotSplitInfo.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/FinishedSnapshotSplitInfo.java
similarity index 88%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/FinishedSnapshotSplitInfo.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/FinishedSnapshotSplitInfo.java
index 1de7158cd..6ddbbc103 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/FinishedSnapshotSplitInfo.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/FinishedSnapshotSplitInfo.java
@@ -16,23 +16,26 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.SerializerUtils.readBinlogPosition;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.SerializerUtils.rowToSerializedString;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.SerializerUtils.serializedStringToRow;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.SerializerUtils.writeBinlogPosition;
+package org.apache.inlong.sort.cdc.mysql.source.split;
import io.debezium.relational.TableId;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Objects;
import org.apache.flink.core.memory.DataInputDeserializer;
import org.apache.flink.core.memory.DataOutputSerializer;
import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
-/** The information used to describe a finished snapshot split. */
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Objects;
+
+import static org.apache.inlong.sort.cdc.mysql.source.utils.SerializerUtils.readBinlogPosition;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.SerializerUtils.rowToSerializedString;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.SerializerUtils.serializedStringToRow;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.SerializerUtils.writeBinlogPosition;
+
+/**
+ * The information used to describe a finished snapshot split.
+ */
public class FinishedSnapshotSplitInfo {
private static final ThreadLocal<DataOutputSerializer> SERIALIZER_CACHE =
@@ -57,6 +60,42 @@ public class FinishedSnapshotSplitInfo {
this.highWatermark = highWatermark;
}
+ // ------------------------------------------------------------------------------------
+ // Utils to serialize/deserialize for transmission between Enumerator and SourceReader
+ // ------------------------------------------------------------------------------------
+ public static byte[] serialize(FinishedSnapshotSplitInfo splitInfo) {
+ try {
+ final DataOutputSerializer out = SERIALIZER_CACHE.get();
+ out.writeUTF(splitInfo.getTableId().toString());
+ out.writeUTF(splitInfo.getSplitId());
+ out.writeUTF(rowToSerializedString(splitInfo.getSplitStart()));
+ out.writeUTF(rowToSerializedString(splitInfo.getSplitEnd()));
+ writeBinlogPosition(splitInfo.getHighWatermark(), out);
+ final byte[] result = out.getCopyOfBuffer();
+ out.clear();
+ return result;
+ } catch (IOException e) {
+ throw new FlinkRuntimeException(e);
+ }
+ }
+
+ public static FinishedSnapshotSplitInfo deserialize(byte[] serialized) {
+ try {
+ final DataInputDeserializer in = new DataInputDeserializer(serialized);
+ TableId tableId = TableId.parse(in.readUTF());
+ String splitId = in.readUTF();
+ Object[] splitStart = serializedStringToRow(in.readUTF());
+ Object[] splitEnd = serializedStringToRow(in.readUTF());
+ BinlogOffset highWatermark = readBinlogPosition(in);
+ in.releaseArrays();
+ return new FinishedSnapshotSplitInfo(
+ tableId, splitId, splitStart, splitEnd, highWatermark);
+
+ } catch (IOException e) {
+ throw new FlinkRuntimeException(e);
+ }
+ }
+
public TableId getTableId() {
return tableId;
}
@@ -117,40 +156,4 @@ public class FinishedSnapshotSplitInfo {
+ highWatermark
+ '}';
}
-
- // ------------------------------------------------------------------------------------
- // Utils to serialize/deserialize for transmission between Enumerator and SourceReader
- // ------------------------------------------------------------------------------------
- public static byte[] serialize(FinishedSnapshotSplitInfo splitInfo) {
- try {
- final DataOutputSerializer out = SERIALIZER_CACHE.get();
- out.writeUTF(splitInfo.getTableId().toString());
- out.writeUTF(splitInfo.getSplitId());
- out.writeUTF(rowToSerializedString(splitInfo.getSplitStart()));
- out.writeUTF(rowToSerializedString(splitInfo.getSplitEnd()));
- writeBinlogPosition(splitInfo.getHighWatermark(), out);
- final byte[] result = out.getCopyOfBuffer();
- out.clear();
- return result;
- } catch (IOException e) {
- throw new FlinkRuntimeException(e);
- }
- }
-
- public static FinishedSnapshotSplitInfo deserialize(byte[] serialized) {
- try {
- final DataInputDeserializer in = new DataInputDeserializer(serialized);
- TableId tableId = TableId.parse(in.readUTF());
- String splitId = in.readUTF();
- Object[] splitStart = serializedStringToRow(in.readUTF());
- Object[] splitEnd = serializedStringToRow(in.readUTF());
- BinlogOffset highWatermark = readBinlogPosition(in);
- in.releaseArrays();
- return new FinishedSnapshotSplitInfo(
- tableId, splitId, splitStart, splitEnd, highWatermark);
-
- } catch (IOException e) {
- throw new FlinkRuntimeException(e);
- }
- }
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlBinlogSplit.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlBinlogSplit.java
similarity index 96%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlBinlogSplit.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlBinlogSplit.java
index 818e88a6b..c14785369 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlBinlogSplit.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlBinlogSplit.java
@@ -16,19 +16,22 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split;
+package org.apache.inlong.sort.cdc.mysql.source.split;
import io.debezium.relational.TableId;
import io.debezium.relational.history.TableChanges.TableChange;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+
+import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
-import javax.annotation.Nullable;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-/** The split to describe the binlog of MySql table(s). */
+/**
+ * The split to describe the binlog of MySql table(s).
+ */
public class MySqlBinlogSplit extends MySqlSplit {
private final BinlogOffset startingOffset;
@@ -37,7 +40,8 @@ public class MySqlBinlogSplit extends MySqlSplit {
private final Map<TableId, TableChange> tableSchemas;
private final int totalFinishedSplitSize;
private final boolean isSuspended;
- @Nullable transient byte[] serializedFormCache;
+ @Nullable
+ transient byte[] serializedFormCache;
public MySqlBinlogSplit(
String splitId,
@@ -72,6 +76,58 @@ public class MySqlBinlogSplit extends MySqlSplit {
this.isSuspended = false;
}
+ // -------------------------------------------------------------------
+ // factory utils to build new MySqlBinlogSplit instance
+ // -------------------------------------------------------------------
+ public static MySqlBinlogSplit appendFinishedSplitInfos(
+ MySqlBinlogSplit binlogSplit, List<FinishedSnapshotSplitInfo> splitInfos) {
+ splitInfos.addAll(binlogSplit.getFinishedSnapshotSplitInfos());
+ return new MySqlBinlogSplit(
+ binlogSplit.splitId,
+ binlogSplit.getStartingOffset(),
+ binlogSplit.getEndingOffset(),
+ splitInfos,
+ binlogSplit.getTableSchemas(),
+ binlogSplit.getTotalFinishedSplitSize(),
+ binlogSplit.isSuspended());
+ }
+
+ public static MySqlBinlogSplit fillTableSchemas(
+ MySqlBinlogSplit binlogSplit, Map<TableId, TableChange> tableSchemas) {
+ tableSchemas.putAll(binlogSplit.getTableSchemas());
+ return new MySqlBinlogSplit(
+ binlogSplit.splitId,
+ binlogSplit.getStartingOffset(),
+ binlogSplit.getEndingOffset(),
+ binlogSplit.getFinishedSnapshotSplitInfos(),
+ tableSchemas,
+ binlogSplit.getTotalFinishedSplitSize(),
+ binlogSplit.isSuspended());
+ }
+
+ public static MySqlBinlogSplit toNormalBinlogSplit(
+ MySqlBinlogSplit suspendedBinlogSplit, int totalFinishedSplitSize) {
+ return new MySqlBinlogSplit(
+ suspendedBinlogSplit.splitId,
+ suspendedBinlogSplit.getStartingOffset(),
+ suspendedBinlogSplit.getEndingOffset(),
+ suspendedBinlogSplit.getFinishedSnapshotSplitInfos(),
+ suspendedBinlogSplit.getTableSchemas(),
+ totalFinishedSplitSize,
+ false);
+ }
+
+ public static MySqlBinlogSplit toSuspendedBinlogSplit(MySqlBinlogSplit normalBinlogSplit) {
+ return new MySqlBinlogSplit(
+ normalBinlogSplit.splitId,
+ normalBinlogSplit.getStartingOffset(),
+ normalBinlogSplit.getEndingOffset(),
+ new ArrayList<>(),
+ new HashMap<>(),
+ normalBinlogSplit.getTotalFinishedSplitSize(),
+ true);
+ }
+
public BinlogOffset getStartingOffset() {
return startingOffset;
}
@@ -147,56 +203,4 @@ public class MySqlBinlogSplit extends MySqlSplit {
+ isSuspended
+ '}';
}
-
- // -------------------------------------------------------------------
- // factory utils to build new MySqlBinlogSplit instance
- // -------------------------------------------------------------------
- public static MySqlBinlogSplit appendFinishedSplitInfos(
- MySqlBinlogSplit binlogSplit, List<FinishedSnapshotSplitInfo> splitInfos) {
- splitInfos.addAll(binlogSplit.getFinishedSnapshotSplitInfos());
- return new MySqlBinlogSplit(
- binlogSplit.splitId,
- binlogSplit.getStartingOffset(),
- binlogSplit.getEndingOffset(),
- splitInfos,
- binlogSplit.getTableSchemas(),
- binlogSplit.getTotalFinishedSplitSize(),
- binlogSplit.isSuspended());
- }
-
- public static MySqlBinlogSplit fillTableSchemas(
- MySqlBinlogSplit binlogSplit, Map<TableId, TableChange> tableSchemas) {
- tableSchemas.putAll(binlogSplit.getTableSchemas());
- return new MySqlBinlogSplit(
- binlogSplit.splitId,
- binlogSplit.getStartingOffset(),
- binlogSplit.getEndingOffset(),
- binlogSplit.getFinishedSnapshotSplitInfos(),
- tableSchemas,
- binlogSplit.getTotalFinishedSplitSize(),
- binlogSplit.isSuspended());
- }
-
- public static MySqlBinlogSplit toNormalBinlogSplit(
- MySqlBinlogSplit suspendedBinlogSplit, int totalFinishedSplitSize) {
- return new MySqlBinlogSplit(
- suspendedBinlogSplit.splitId,
- suspendedBinlogSplit.getStartingOffset(),
- suspendedBinlogSplit.getEndingOffset(),
- suspendedBinlogSplit.getFinishedSnapshotSplitInfos(),
- suspendedBinlogSplit.getTableSchemas(),
- totalFinishedSplitSize,
- false);
- }
-
- public static MySqlBinlogSplit toSuspendedBinlogSplit(MySqlBinlogSplit normalBinlogSplit) {
- return new MySqlBinlogSplit(
- normalBinlogSplit.splitId,
- normalBinlogSplit.getStartingOffset(),
- normalBinlogSplit.getEndingOffset(),
- new ArrayList<>(),
- new HashMap<>(),
- normalBinlogSplit.getTotalFinishedSplitSize(),
- true);
- }
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlBinlogSplitState.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlBinlogSplitState.java
similarity index 89%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlBinlogSplitState.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlBinlogSplitState.java
index 33fe2d2b5..6c2dac379 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlBinlogSplitState.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlBinlogSplitState.java
@@ -16,20 +16,25 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split;
+package org.apache.inlong.sort.cdc.mysql.source.split;
import io.debezium.relational.TableId;
import io.debezium.relational.history.TableChanges.TableChange;
-import java.util.Map;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+
import javax.annotation.Nullable;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
+import java.util.Map;
-/** The state of split to describe the binlog of MySql table(s). */
+/**
+ * The state of split to describe the binlog of MySql table(s).
+ */
public class MySqlBinlogSplitState extends MySqlSplitState {
- @Nullable private BinlogOffset startingOffset;
- @Nullable private BinlogOffset endingOffset;
private final Map<TableId, TableChange> tableSchemas;
+ @Nullable
+ private BinlogOffset startingOffset;
+ @Nullable
+ private BinlogOffset endingOffset;
public MySqlBinlogSplitState(MySqlBinlogSplit split) {
super(split);
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlRecords.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlRecords.java
similarity index 97%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlRecords.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlRecords.java
index 6171f58a8..0004ca0c1 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlRecords.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlRecords.java
@@ -16,14 +16,15 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split;
+package org.apache.inlong.sort.cdc.mysql.source.split;
+import org.apache.flink.connector.base.source.reader.RecordsWithSplitIds;
+import org.apache.kafka.connect.source.SourceRecord;
+
+import javax.annotation.Nullable;
import java.util.Collections;
import java.util.Iterator;
import java.util.Set;
-import javax.annotation.Nullable;
-import org.apache.flink.connector.base.source.reader.RecordsWithSplitIds;
-import org.apache.kafka.connect.source.SourceRecord;
/**
* An implementation of {@link RecordsWithSplitIds} which contains the records of one table split.
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSnapshotSplit.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSnapshotSplit.java
similarity index 89%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSnapshotSplit.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSnapshotSplit.java
index 61f068a39..1e3130004 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSnapshotSplit.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSnapshotSplit.java
@@ -16,31 +16,40 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split;
+package org.apache.inlong.sort.cdc.mysql.source.split;
import io.debezium.relational.TableId;
import io.debezium.relational.history.TableChanges.TableChange;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+
+import javax.annotation.Nullable;
import java.util.Arrays;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
-import javax.annotation.Nullable;
-import org.apache.flink.table.types.logical.RowType;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-/** The split to describe a split of a MySql table snapshot. */
+/**
+ * The split to describe a split of a MySql table snapshot.
+ */
public class MySqlSnapshotSplit extends MySqlSplit {
private final TableId tableId;
private final RowType splitKeyType;
private final Map<TableId, TableChange> tableSchemas;
- @Nullable private final Object[] splitStart;
- @Nullable private final Object[] splitEnd;
- /** The high watermark is not bull when the split read finished. */
- @Nullable private final BinlogOffset highWatermark;
+ @Nullable
+ private final Object[] splitStart;
+ @Nullable
+ private final Object[] splitEnd;
+ /**
+ * The high watermark is not bull when the split read finished.
+ */
+ @Nullable
+ private final BinlogOffset highWatermark;
- @Nullable transient byte[] serializedFormCache;
+ @Nullable
+ transient byte[] serializedFormCache;
public MySqlSnapshotSplit(
TableId tableId,
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSnapshotSplitState.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSnapshotSplitState.java
similarity index 87%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSnapshotSplitState.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSnapshotSplitState.java
index 8648deca7..226e14894 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSnapshotSplitState.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSnapshotSplitState.java
@@ -16,15 +16,19 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split;
+package org.apache.inlong.sort.cdc.mysql.source.split;
+
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
import javax.annotation.Nullable;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-/** The state of split to describe the binlog of MySql table(s). */
+/**
+ * The state of split to describe the binlog of MySql table(s).
+ */
public class MySqlSnapshotSplitState extends MySqlSplitState {
- @Nullable private BinlogOffset highWatermark;
+ @Nullable
+ private BinlogOffset highWatermark;
public MySqlSnapshotSplitState(MySqlSnapshotSplit split) {
super(split);
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSplit.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSplit.java
similarity index 97%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSplit.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSplit.java
index 7b76cf437..a84f196bd 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSplit.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSplit.java
@@ -16,13 +16,14 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split;
+package org.apache.inlong.sort.cdc.mysql.source.split;
import io.debezium.relational.TableId;
import io.debezium.relational.history.TableChanges;
+import org.apache.flink.api.connector.source.SourceSplit;
+
import java.util.Map;
import java.util.Objects;
-import org.apache.flink.api.connector.source.SourceSplit;
/** The split of table comes from a Table that splits by primary key. */
public abstract class MySqlSplit implements SourceSplit {
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSplitSerializer.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSplitSerializer.java
similarity index 93%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSplitSerializer.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSplitSerializer.java
index 747e55765..18f7161e5 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSplitSerializer.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSplitSerializer.java
@@ -16,33 +16,36 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.SerializerUtils.readBinlogPosition;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.SerializerUtils.rowToSerializedString;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.SerializerUtils.serializedStringToRow;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.SerializerUtils.writeBinlogPosition;
+package org.apache.inlong.sort.cdc.mysql.source.split;
import io.debezium.document.Document;
import io.debezium.document.DocumentReader;
import io.debezium.document.DocumentWriter;
import io.debezium.relational.TableId;
import io.debezium.relational.history.TableChanges.TableChange;
+import org.apache.flink.core.io.SimpleVersionedSerializer;
+import org.apache.flink.core.memory.DataInputDeserializer;
+import org.apache.flink.core.memory.DataOutputSerializer;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.flink.table.types.logical.utils.LogicalTypeParser;
+import org.apache.inlong.sort.cdc.debezium.history.FlinkJsonTableChangeSerializer;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.apache.flink.core.io.SimpleVersionedSerializer;
-import org.apache.flink.core.memory.DataInputDeserializer;
-import org.apache.flink.core.memory.DataOutputSerializer;
-import org.apache.flink.table.types.logical.RowType;
-import org.apache.flink.table.types.logical.utils.LogicalTypeParser;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.history.FlinkJsonTableChangeSerializer;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-/** A serializer for the {@link MySqlSplit}. */
+import static org.apache.inlong.sort.cdc.mysql.source.utils.SerializerUtils.readBinlogPosition;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.SerializerUtils.rowToSerializedString;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.SerializerUtils.serializedStringToRow;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.SerializerUtils.writeBinlogPosition;
+
+/**
+ * A serializer for the {@link MySqlSplit}.
+ */
public final class MySqlSplitSerializer implements SimpleVersionedSerializer<MySqlSplit> {
public static final MySqlSplitSerializer INSTANCE = new MySqlSplitSerializer();
@@ -54,6 +57,83 @@ public final class MySqlSplitSerializer implements SimpleVersionedSerializer<MyS
private static final int SNAPSHOT_SPLIT_FLAG = 1;
private static final int BINLOG_SPLIT_FLAG = 2;
+ private static void writeTableSchemas(
+ Map<TableId, TableChange> tableSchemas, DataOutputSerializer out) throws IOException {
+ FlinkJsonTableChangeSerializer jsonSerializer = new FlinkJsonTableChangeSerializer();
+ DocumentWriter documentWriter = DocumentWriter.defaultWriter();
+ final int size = tableSchemas.size();
+ out.writeInt(size);
+ for (Map.Entry<TableId, TableChange> entry : tableSchemas.entrySet()) {
+ out.writeUTF(entry.getKey().toString());
+ final String tableChangeStr =
+ documentWriter.write(jsonSerializer.toDocument(entry.getValue()));
+ final byte[] tableChangeBytes = tableChangeStr.getBytes(StandardCharsets.UTF_8);
+ out.writeInt(tableChangeBytes.length);
+ out.write(tableChangeBytes);
+ }
+ }
+
+ private static Map<TableId, TableChange> readTableSchemas(int version, DataInputDeserializer in)
+ throws IOException {
+ DocumentReader documentReader = DocumentReader.defaultReader();
+ Map<TableId, TableChange> tableSchemas = new HashMap<>();
+ final int size = in.readInt();
+ for (int i = 0; i < size; i++) {
+ TableId tableId = TableId.parse(in.readUTF());
+ final String tableChangeStr;
+ switch (version) {
+ case 1:
+ tableChangeStr = in.readUTF();
+ break;
+ case 2:
+ case 3:
+ case 4:
+ final int len = in.readInt();
+ final byte[] bytes = new byte[len];
+ in.read(bytes);
+ tableChangeStr = new String(bytes, StandardCharsets.UTF_8);
+ break;
+ default:
+ throw new IOException("Unknown version: " + version);
+ }
+ Document document = documentReader.read(tableChangeStr);
+ TableChange tableChange = FlinkJsonTableChangeSerializer.fromDocument(document, true);
+ tableSchemas.put(tableId, tableChange);
+ }
+ return tableSchemas;
+ }
+
+ private static void writeFinishedSplitsInfo(
+ List<FinishedSnapshotSplitInfo> finishedSplitsInfo, DataOutputSerializer out)
+ throws IOException {
+ final int size = finishedSplitsInfo.size();
+ out.writeInt(size);
+ for (FinishedSnapshotSplitInfo splitInfo : finishedSplitsInfo) {
+ out.writeUTF(splitInfo.getTableId().toString());
+ out.writeUTF(splitInfo.getSplitId());
+ out.writeUTF(rowToSerializedString(splitInfo.getSplitStart()));
+ out.writeUTF(rowToSerializedString(splitInfo.getSplitEnd()));
+ writeBinlogPosition(splitInfo.getHighWatermark(), out);
+ }
+ }
+
+ private static List<FinishedSnapshotSplitInfo> readFinishedSplitsInfo(
+ int version, DataInputDeserializer in) throws IOException {
+ List<FinishedSnapshotSplitInfo> finishedSplitsInfo = new ArrayList<>();
+ final int size = in.readInt();
+ for (int i = 0; i < size; i++) {
+ TableId tableId = TableId.parse(in.readUTF());
+ String splitId = in.readUTF();
+ Object[] splitStart = serializedStringToRow(in.readUTF());
+ Object[] splitEnd = serializedStringToRow(in.readUTF());
+ BinlogOffset highWatermark = readBinlogPosition(version, in);
+ finishedSplitsInfo.add(
+ new FinishedSnapshotSplitInfo(
+ tableId, splitId, splitStart, splitEnd, highWatermark));
+ }
+ return finishedSplitsInfo;
+ }
+
@Override
public int getVersion() {
return VERSION;
@@ -179,81 +259,4 @@ public final class MySqlSplitSerializer implements SimpleVersionedSerializer<MyS
throw new IOException("Unknown split kind: " + splitKind);
}
}
-
- private static void writeTableSchemas(
- Map<TableId, TableChange> tableSchemas, DataOutputSerializer out) throws IOException {
- FlinkJsonTableChangeSerializer jsonSerializer = new FlinkJsonTableChangeSerializer();
- DocumentWriter documentWriter = DocumentWriter.defaultWriter();
- final int size = tableSchemas.size();
- out.writeInt(size);
- for (Map.Entry<TableId, TableChange> entry : tableSchemas.entrySet()) {
- out.writeUTF(entry.getKey().toString());
- final String tableChangeStr =
- documentWriter.write(jsonSerializer.toDocument(entry.getValue()));
- final byte[] tableChangeBytes = tableChangeStr.getBytes(StandardCharsets.UTF_8);
- out.writeInt(tableChangeBytes.length);
- out.write(tableChangeBytes);
- }
- }
-
- private static Map<TableId, TableChange> readTableSchemas(int version, DataInputDeserializer in)
- throws IOException {
- DocumentReader documentReader = DocumentReader.defaultReader();
- Map<TableId, TableChange> tableSchemas = new HashMap<>();
- final int size = in.readInt();
- for (int i = 0; i < size; i++) {
- TableId tableId = TableId.parse(in.readUTF());
- final String tableChangeStr;
- switch (version) {
- case 1:
- tableChangeStr = in.readUTF();
- break;
- case 2:
- case 3:
- case 4:
- final int len = in.readInt();
- final byte[] bytes = new byte[len];
- in.read(bytes);
- tableChangeStr = new String(bytes, StandardCharsets.UTF_8);
- break;
- default:
- throw new IOException("Unknown version: " + version);
- }
- Document document = documentReader.read(tableChangeStr);
- TableChange tableChange = FlinkJsonTableChangeSerializer.fromDocument(document, true);
- tableSchemas.put(tableId, tableChange);
- }
- return tableSchemas;
- }
-
- private static void writeFinishedSplitsInfo(
- List<FinishedSnapshotSplitInfo> finishedSplitsInfo, DataOutputSerializer out)
- throws IOException {
- final int size = finishedSplitsInfo.size();
- out.writeInt(size);
- for (FinishedSnapshotSplitInfo splitInfo : finishedSplitsInfo) {
- out.writeUTF(splitInfo.getTableId().toString());
- out.writeUTF(splitInfo.getSplitId());
- out.writeUTF(rowToSerializedString(splitInfo.getSplitStart()));
- out.writeUTF(rowToSerializedString(splitInfo.getSplitEnd()));
- writeBinlogPosition(splitInfo.getHighWatermark(), out);
- }
- }
-
- private static List<FinishedSnapshotSplitInfo> readFinishedSplitsInfo(
- int version, DataInputDeserializer in) throws IOException {
- List<FinishedSnapshotSplitInfo> finishedSplitsInfo = new ArrayList<>();
- final int size = in.readInt();
- for (int i = 0; i < size; i++) {
- TableId tableId = TableId.parse(in.readUTF());
- String splitId = in.readUTF();
- Object[] splitStart = serializedStringToRow(in.readUTF());
- Object[] splitEnd = serializedStringToRow(in.readUTF());
- BinlogOffset highWatermark = readBinlogPosition(version, in);
- finishedSplitsInfo.add(
- new FinishedSnapshotSplitInfo(
- tableId, splitId, splitStart, splitEnd, highWatermark));
- }
- return finishedSplitsInfo;
- }
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSplitState.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSplitState.java
similarity index 96%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSplitState.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSplitState.java
index 7f0318b6a..845c1da26 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/split/MySqlSplitState.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/split/MySqlSplitState.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split;
+package org.apache.inlong.sort.cdc.mysql.source.split;
import org.apache.flink.annotation.Internal;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/ChunkUtils.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/ChunkUtils.java
similarity index 90%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/ChunkUtils.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/ChunkUtils.java
index c4a9e916e..462458868 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/ChunkUtils.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/ChunkUtils.java
@@ -16,20 +16,23 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils;
-
-import static org.apache.flink.table.api.DataTypes.FIELD;
-import static org.apache.flink.table.api.DataTypes.ROW;
+package org.apache.inlong.sort.cdc.mysql.source.utils;
import io.debezium.relational.Column;
import io.debezium.relational.Table;
-import java.util.List;
import org.apache.flink.table.api.ValidationException;
import org.apache.flink.table.types.logical.RowType;
import org.apache.flink.util.Preconditions;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.schema.MySqlTypeUtils;
+import org.apache.inlong.sort.cdc.mysql.schema.MySqlTypeUtils;
+
+import java.util.List;
-/** Utilities to split chunks of table. */
+import static org.apache.flink.table.api.DataTypes.FIELD;
+import static org.apache.flink.table.api.DataTypes.ROW;
+
+/**
+ * Utilities to split chunks of table.
+ */
public class ChunkUtils {
private ChunkUtils() {
@@ -69,7 +72,9 @@ public class ChunkUtils {
return primaryKeys.get(0);
}
- /** Returns next meta group id according to received meta number and meta group size. */
+ /**
+ * Returns next meta group id according to received meta number and meta group size.
+ */
public static int getNextMetaGroupId(int receivedMetaNum, int metaGroupSize) {
Preconditions.checkState(metaGroupSize > 0);
return receivedMetaNum % metaGroupSize == 0
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/ObjectUtils.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/ObjectUtils.java
similarity index 98%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/ObjectUtils.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/ObjectUtils.java
index 26f024b85..9a46310a1 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/ObjectUtils.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/ObjectUtils.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils;
+package org.apache.inlong.sort.cdc.mysql.source.utils;
import java.math.BigDecimal;
import java.math.BigInteger;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/RecordUtils.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/RecordUtils.java
similarity index 89%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/RecordUtils.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/RecordUtils.java
index d542133d5..5e944c828 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/RecordUtils.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/RecordUtils.java
@@ -16,13 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils;
-
-import static org.apache.flink.util.Preconditions.checkState;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher.EventDispatcherImpl.HISTORY_RECORD_FIELD;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher.SignalEventDispatcher.SIGNAL_EVENT_VALUE_SCHEMA_NAME;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher.SignalEventDispatcher.SPLIT_ID_KEY;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher.SignalEventDispatcher.WATERMARK_KIND;
+package org.apache.inlong.sort.cdc.mysql.source.utils;
import io.debezium.connector.AbstractSourceInfo;
import io.debezium.data.Envelope;
@@ -30,6 +24,15 @@ import io.debezium.document.DocumentReader;
import io.debezium.relational.TableId;
import io.debezium.relational.history.HistoryRecord;
import io.debezium.util.SchemaNameAdjuster;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.inlong.sort.cdc.mysql.debezium.dispatcher.SignalEventDispatcher;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
+import org.apache.inlong.sort.cdc.mysql.source.split.MySqlSnapshotSplit;
+import org.apache.kafka.connect.data.Schema;
+import org.apache.kafka.connect.data.Struct;
+import org.apache.kafka.connect.source.SourceRecord;
+
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
@@ -42,22 +45,17 @@ import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
-import org.apache.flink.table.types.logical.RowType;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.dispatcher.SignalEventDispatcher.WatermarkKind;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.debezium.reader.DebeziumReader;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.FinishedSnapshotSplitInfo;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.split.MySqlSnapshotSplit;
-import org.apache.kafka.connect.data.Schema;
-import org.apache.kafka.connect.data.Struct;
-import org.apache.kafka.connect.source.SourceRecord;
-/** Utility class to deal record. */
-public class RecordUtils {
-
- private RecordUtils() {
+import static org.apache.flink.util.Preconditions.checkState;
+import static org.apache.inlong.sort.cdc.mysql.debezium.dispatcher.EventDispatcherImpl.HISTORY_RECORD_FIELD;
+import static org.apache.inlong.sort.cdc.mysql.debezium.dispatcher.SignalEventDispatcher.SIGNAL_EVENT_VALUE_SCHEMA_NAME;
+import static org.apache.inlong.sort.cdc.mysql.debezium.dispatcher.SignalEventDispatcher.SPLIT_ID_KEY;
+import static org.apache.inlong.sort.cdc.mysql.debezium.dispatcher.SignalEventDispatcher.WATERMARK_KIND;
- }
+/**
+ * Utility class to deal record.
+ */
+public class RecordUtils {
public static final String SCHEMA_CHANGE_EVENT_KEY_NAME =
"io.debezium.connector.mysql.SchemaChangeKey";
@@ -65,7 +63,13 @@ public class RecordUtils {
"io.debezium.connector.common.Heartbeat";
private static final DocumentReader DOCUMENT_READER = DocumentReader.defaultReader();
- /** Converts a {@link ResultSet} row to an array of Objects. */
+ private RecordUtils() {
+
+ }
+
+ /**
+ * Converts a {@link ResultSet} row to an array of Objects.
+ */
public static Object[] rowToArray(ResultSet rs, int size) throws SQLException {
final Object[] row = new Object[size];
for (int i = 0; i < size; i++) {
@@ -225,29 +229,29 @@ public class RecordUtils {
}
public static boolean isWatermarkEvent(SourceRecord record) {
- Optional<WatermarkKind> watermarkKind = getWatermarkKind(record);
+ Optional<SignalEventDispatcher.WatermarkKind> watermarkKind = getWatermarkKind(record);
return watermarkKind.isPresent();
}
public static boolean isLowWatermarkEvent(SourceRecord record) {
- Optional<WatermarkKind> watermarkKind = getWatermarkKind(record);
- if (watermarkKind.isPresent() && watermarkKind.get() == WatermarkKind.LOW) {
+ Optional<SignalEventDispatcher.WatermarkKind> watermarkKind = getWatermarkKind(record);
+ if (watermarkKind.isPresent() && watermarkKind.get() == SignalEventDispatcher.WatermarkKind.LOW) {
return true;
}
return false;
}
public static boolean isHighWatermarkEvent(SourceRecord record) {
- Optional<WatermarkKind> watermarkKind = getWatermarkKind(record);
- if (watermarkKind.isPresent() && watermarkKind.get() == WatermarkKind.HIGH) {
+ Optional<SignalEventDispatcher.WatermarkKind> watermarkKind = getWatermarkKind(record);
+ if (watermarkKind.isPresent() && watermarkKind.get() == SignalEventDispatcher.WatermarkKind.HIGH) {
return true;
}
return false;
}
public static boolean isEndWatermarkEvent(SourceRecord record) {
- Optional<WatermarkKind> watermarkKind = getWatermarkKind(record);
- if (watermarkKind.isPresent() && watermarkKind.get() == WatermarkKind.BINLOG_END) {
+ Optional<SignalEventDispatcher.WatermarkKind> watermarkKind = getWatermarkKind(record);
+ if (watermarkKind.isPresent() && watermarkKind.get() == SignalEventDispatcher.WatermarkKind.BINLOG_END) {
return true;
}
return false;
@@ -311,7 +315,7 @@ public class RecordUtils {
* Return the finished snapshot split information.
*
* @return [splitId, splitStart, splitEnd, highWatermark], the information will be used to
- * filter binlog events when read binlog of table.
+ * filter binlog events when read binlog of table.
*/
public static FinishedSnapshotSplitInfo getSnapshotSplitInfo(
MySqlSnapshotSplit split, SourceRecord highWatermark) {
@@ -325,7 +329,9 @@ public class RecordUtils {
getBinlogPosition(highWatermark.sourceOffset()));
}
- /** Returns the start offset of the binlog split. */
+ /**
+ * Returns the start offset of the binlog split.
+ */
public static BinlogOffset getStartingOffsetOfBinlogSplit(
List<FinishedSnapshotSplitInfo> finishedSnapshotSplits) {
BinlogOffset startOffset =
@@ -360,7 +366,7 @@ public class RecordUtils {
// the split key field contains single field now
String splitFieldName = nameAdjuster.adjust(splitBoundaryType.getFieldNames().get(0));
Struct key = (Struct) dataRecord.key();
- return new Object[] {key.get(splitFieldName)};
+ return new Object[]{key.get(splitFieldName)};
}
public static BinlogOffset getBinlogPosition(SourceRecord dataRecord) {
@@ -376,7 +382,9 @@ public class RecordUtils {
return new BinlogOffset(offsetStrMap);
}
- /** Returns the specific key contains in the split key range or not. */
+ /**
+ * Returns the specific key contains in the split key range or not.
+ */
public static boolean splitKeyRangeContains(
Object[] key, Object[] splitKeyStart, Object[] splitKeyEnd) {
// for all range
@@ -406,7 +414,7 @@ public class RecordUtils {
}
return Arrays.stream(lowerBoundRes).anyMatch(value -> value >= 0)
&& (Arrays.stream(upperBoundRes).anyMatch(value -> value < 0)
- && Arrays.stream(upperBoundRes).allMatch(value -> value <= 0));
+ && Arrays.stream(upperBoundRes).allMatch(value -> value <= 0));
}
}
@@ -424,11 +432,11 @@ public class RecordUtils {
return new HistoryRecord(DOCUMENT_READER.read(historyRecordStr));
}
- private static Optional<WatermarkKind> getWatermarkKind(SourceRecord record) {
+ private static Optional<SignalEventDispatcher.WatermarkKind> getWatermarkKind(SourceRecord record) {
if (record.valueSchema() != null
&& SIGNAL_EVENT_VALUE_SCHEMA_NAME.equals(record.valueSchema().name())) {
Struct value = (Struct) record.value();
- return Optional.of(WatermarkKind.valueOf(value.getString(WATERMARK_KIND)));
+ return Optional.of(SignalEventDispatcher.WatermarkKind.valueOf(value.getString(WATERMARK_KIND)));
}
return Optional.empty();
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/SerializerUtils.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/SerializerUtils.java
similarity index 86%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/SerializerUtils.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/SerializerUtils.java
index 2f8da42a3..cf190cfcf 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/SerializerUtils.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/SerializerUtils.java
@@ -16,21 +16,24 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils;
+package org.apache.inlong.sort.cdc.mysql.source.utils;
import io.debezium.DebeziumException;
import io.debezium.util.HexConverter;
+import org.apache.flink.core.memory.DataInputDeserializer;
+import org.apache.flink.core.memory.DataOutputSerializer;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffset;
+import org.apache.inlong.sort.cdc.mysql.source.offset.BinlogOffsetSerializer;
+
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
-import org.apache.flink.core.memory.DataInputDeserializer;
-import org.apache.flink.core.memory.DataOutputSerializer;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffset;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.offset.BinlogOffsetSerializer;
-/** Utils for serialization and deserialization. */
+/**
+ * Utils for serialization and deserialization.
+ */
public class SerializerUtils {
private SerializerUtils() {
@@ -74,7 +77,7 @@ public class SerializerUtils {
public static String rowToSerializedString(Object[] splitBoundary) {
try (final ByteArrayOutputStream bos = new ByteArrayOutputStream();
- ObjectOutputStream oos = new ObjectOutputStream(bos)) {
+ ObjectOutputStream oos = new ObjectOutputStream(bos)) {
oos.writeObject(splitBoundary);
return HexConverter.convertToHexString(bos.toByteArray());
} catch (IOException e) {
@@ -85,8 +88,8 @@ public class SerializerUtils {
public static Object[] serializedStringToRow(String serialized) {
try (final ByteArrayInputStream bis =
- new ByteArrayInputStream(HexConverter.convertFromHex(serialized));
- ObjectInputStream ois = new ObjectInputStream(bis)) {
+ new ByteArrayInputStream(HexConverter.convertFromHex(serialized));
+ ObjectInputStream ois = new ObjectInputStream(bis)) {
return (Object[]) ois.readObject();
} catch (Exception e) {
throw new DebeziumException(
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/StatementUtils.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/StatementUtils.java
similarity index 97%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/StatementUtils.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/StatementUtils.java
index d7ad7637a..56ba0881e 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/StatementUtils.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/StatementUtils.java
@@ -16,21 +16,24 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.RecordUtils.rowToArray;
+package org.apache.inlong.sort.cdc.mysql.source.utils;
import io.debezium.jdbc.JdbcConnection;
import io.debezium.relational.TableId;
+import org.apache.flink.table.types.logical.RowType;
+
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Iterator;
import java.util.Optional;
import java.util.stream.Collectors;
-import org.apache.flink.table.types.logical.RowType;
-/** Utils to prepare SQL statement. */
+import static org.apache.inlong.sort.cdc.mysql.source.utils.RecordUtils.rowToArray;
+
+/**
+ * Utils to prepare SQL statement.
+ */
public class StatementUtils {
private StatementUtils() {
@@ -269,7 +272,7 @@ public class StatementUtils {
private static void addPrimaryKeyColumnsToCondition(
RowType pkRowType, StringBuilder sql, String predicate) {
for (Iterator<String> fieldNamesIt = pkRowType.getFieldNames().iterator();
- fieldNamesIt.hasNext(); ) {
+ fieldNamesIt.hasNext(); ) {
sql.append(fieldNamesIt.next()).append(predicate);
if (fieldNamesIt.hasNext()) {
sql.append(" AND ");
@@ -280,7 +283,7 @@ public class StatementUtils {
private static String getPrimaryKeyColumnsProjection(RowType pkRowType) {
StringBuilder sql = new StringBuilder();
for (Iterator<String> fieldNamesIt = pkRowType.getFieldNames().iterator();
- fieldNamesIt.hasNext(); ) {
+ fieldNamesIt.hasNext(); ) {
sql.append(fieldNamesIt.next());
if (fieldNamesIt.hasNext()) {
sql.append(" , ");
@@ -292,7 +295,7 @@ public class StatementUtils {
private static String getMaxPrimaryKeyColumnsProjection(RowType pkRowType) {
StringBuilder sql = new StringBuilder();
for (Iterator<String> fieldNamesIt = pkRowType.getFieldNames().iterator();
- fieldNamesIt.hasNext(); ) {
+ fieldNamesIt.hasNext(); ) {
sql.append("MAX(" + fieldNamesIt.next() + ")");
if (fieldNamesIt.hasNext()) {
sql.append(" , ");
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/TableDiscoveryUtils.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/TableDiscoveryUtils.java
similarity index 91%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/TableDiscoveryUtils.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/TableDiscoveryUtils.java
index a1a1a4d02..84c034263 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/source/utils/TableDiscoveryUtils.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/source/utils/TableDiscoveryUtils.java
@@ -16,27 +16,30 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils;
-
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.StatementUtils.quote;
+package org.apache.inlong.sort.cdc.mysql.source.utils;
import io.debezium.connector.mysql.MySqlConnection;
import io.debezium.jdbc.JdbcConnection;
import io.debezium.relational.RelationalTableFilters;
import io.debezium.relational.TableId;
import io.debezium.relational.history.TableChanges;
+import org.apache.flink.util.FlinkRuntimeException;
+import org.apache.inlong.sort.cdc.mysql.schema.MySqlSchema;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.apache.flink.util.FlinkRuntimeException;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.schema.MySqlSchema;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-/** Utilities to discovery matched tables. */
+import static org.apache.inlong.sort.cdc.mysql.source.utils.StatementUtils.quote;
+
+/**
+ * Utilities to discovery matched tables.
+ */
public class TableDiscoveryUtils {
private static final Logger LOG = LoggerFactory.getLogger(TableDiscoveryUtils.class);
@@ -111,7 +114,7 @@ public class TableDiscoveryUtils {
throw new IllegalArgumentException(
String.format(
"Can't find any matched tables, please check your "
- + "configured database-name: %s and table-name: %s",
+ + "configured database-name: %s and table-name: %s",
sourceConfig.getDatabaseList(), sourceConfig.getTableList()));
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/JdbcUrlUtils.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/JdbcUrlUtils.java
similarity index 96%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/JdbcUrlUtils.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/JdbcUrlUtils.java
index fdf83b600..d2f25222b 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/JdbcUrlUtils.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/JdbcUrlUtils.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.table;
+package org.apache.inlong.sort.cdc.mysql.table;
import java.util.Map;
import java.util.Properties;
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlDeserializationConverterFactory.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlDeserializationConverterFactory.java
similarity index 95%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlDeserializationConverterFactory.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlDeserializationConverterFactory.java
index dd52f09ad..54a63994d 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlDeserializationConverterFactory.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlDeserializationConverterFactory.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.table;
+package org.apache.inlong.sort.cdc.mysql.table;
import com.esri.core.geometry.ogc.OGCGeometry;
import com.fasterxml.jackson.databind.JsonNode;
@@ -25,23 +25,26 @@ import com.fasterxml.jackson.databind.ObjectWriter;
import io.debezium.data.EnumSet;
import io.debezium.data.geometry.Geometry;
import io.debezium.data.geometry.Point;
-import java.nio.ByteBuffer;
-import java.time.ZoneId;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Optional;
import org.apache.flink.table.data.GenericArrayData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.types.logical.ArrayType;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.LogicalTypeFamily;
import org.apache.flink.table.types.logical.utils.LogicalTypeChecks;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.table.DeserializationRuntimeConverter;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.table.DeserializationRuntimeConverterFactory;
+import org.apache.inlong.sort.cdc.debezium.table.DeserializationRuntimeConverter;
+import org.apache.inlong.sort.cdc.debezium.table.DeserializationRuntimeConverterFactory;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct;
-/** Used to create {@link DeserializationRuntimeConverterFactory} specified to MySQL. */
+import java.nio.ByteBuffer;
+import java.time.ZoneId;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+
+/**
+ * Used to create {@link DeserializationRuntimeConverterFactory} specified to MySQL.
+ */
public class MySqlDeserializationConverterFactory {
/**
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlReadableMetadata.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlReadableMetadata.java
similarity index 79%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlReadableMetadata.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlReadableMetadata.java
index e19f21dba..b80094c62 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlReadableMetadata.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlReadableMetadata.java
@@ -16,14 +16,13 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.table;
+package org.apache.inlong.sort.cdc.mysql.table;
import io.debezium.connector.AbstractSourceInfo;
import io.debezium.data.Envelope;
import io.debezium.data.Envelope.FieldName;
import io.debezium.relational.Table;
import io.debezium.relational.history.TableChanges;
-import javax.annotation.Nullable;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.data.GenericArrayData;
@@ -33,11 +32,12 @@ import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.apache.flink.table.types.DataType;
+import org.apache.inlong.sort.cdc.debezium.table.MetadataConverter;
import org.apache.inlong.sort.formats.json.canal.CanalJson;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.table.MetadataConverter;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
+import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@@ -90,62 +90,62 @@ public enum MySqlReadableMetadata {
@Override
public Object read(SourceRecord record) {
Struct messageStruct = (Struct) record.value();
- Struct sourceStruct = messageStruct.getStruct(Envelope.FieldName.SOURCE);
+ Struct sourceStruct = messageStruct.getStruct(FieldName.SOURCE);
return TimestampData.fromEpochMillis(
(Long) sourceStruct.get(AbstractSourceInfo.TIMESTAMP_KEY));
}
}),
DATA(
- "meta.data",
- DataTypes.STRING(),
- new MetadataConverter() {
- private static final long serialVersionUID = 1L;
-
- @Override
- public Object read(SourceRecord record) {
- record.value().toString();
- Struct messageStruct = (Struct) record.value();
- Struct sourceStruct = messageStruct.getStruct(FieldName.TIMESTAMP);
- sourceStruct.get(AbstractSourceInfo.TIMESTAMP_KEY);
- return TimestampData.fromEpochMillis(
- (Long) sourceStruct.get(AbstractSourceInfo.TIMESTAMP_KEY));
- }
-
- @Override
- public Object read(SourceRecord record,
- @Nullable TableChanges.TableChange tableSchema, RowData rowData) {
- // construct canal json
- Struct messageStruct = (Struct) record.value();
- Struct sourceStruct = messageStruct.getStruct(FieldName.SOURCE);
- // tableName
- String tableName = getMetaData(record, AbstractSourceInfo.TABLE_NAME_KEY);
- // databaseName
- String databaseName = getMetaData(record, AbstractSourceInfo.DATABASE_NAME_KEY);
- // opTs
- long opTs = (Long) sourceStruct.get(AbstractSourceInfo.TIMESTAMP_KEY);
- // ts
- long ts = (Long) messageStruct.get(Envelope.FieldName.TIMESTAMP);
- // actual data
- GenericRowData data = (GenericRowData) rowData;
- Map<String, Object> field = (Map<String, Object>) data.getField(0);
- List<Map<String, Object>> dataList = new ArrayList<>();
- dataList.add(field);
-
- CanalJson canalJson = CanalJson.builder()
- .data(dataList).database(databaseName)
- .sql("").es(opTs).isDdl(false).pkNames(getPkNames(tableSchema))
- .mysqlType(getMysqlType(tableSchema)).table(tableName).ts(ts)
- .type(getOpType(record)).build();
-
- try {
- ObjectMapper objectMapper = new ObjectMapper();
- return StringData.fromString(objectMapper.writeValueAsString(canalJson));
- } catch (Exception e) {
- throw new IllegalStateException("exception occurs when get meta data", e);
+ "meta.data",
+ DataTypes.STRING(),
+ new MetadataConverter() {
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ public Object read(SourceRecord record) {
+ record.value().toString();
+ Struct messageStruct = (Struct) record.value();
+ Struct sourceStruct = messageStruct.getStruct(FieldName.TIMESTAMP);
+ sourceStruct.get(AbstractSourceInfo.TIMESTAMP_KEY);
+ return TimestampData.fromEpochMillis(
+ (Long) sourceStruct.get(AbstractSourceInfo.TIMESTAMP_KEY));
}
- }
- }),
+
+ @Override
+ public Object read(SourceRecord record,
+ @Nullable TableChanges.TableChange tableSchema, RowData rowData) {
+ // construct canal json
+ Struct messageStruct = (Struct) record.value();
+ Struct sourceStruct = messageStruct.getStruct(FieldName.SOURCE);
+ // tableName
+ String tableName = getMetaData(record, AbstractSourceInfo.TABLE_NAME_KEY);
+ // databaseName
+ String databaseName = getMetaData(record, AbstractSourceInfo.DATABASE_NAME_KEY);
+ // opTs
+ long opTs = (Long) sourceStruct.get(AbstractSourceInfo.TIMESTAMP_KEY);
+ // ts
+ long ts = (Long) messageStruct.get(FieldName.TIMESTAMP);
+ // actual data
+ GenericRowData data = (GenericRowData) rowData;
+ Map<String, Object> field = (Map<String, Object>) data.getField(0);
+ List<Map<String, Object>> dataList = new ArrayList<>();
+ dataList.add(field);
+
+ CanalJson canalJson = CanalJson.builder()
+ .data(dataList).database(databaseName)
+ .sql("").es(opTs).isDdl(false).pkNames(getPkNames(tableSchema))
+ .mysqlType(getMysqlType(tableSchema)).table(tableName).ts(ts)
+ .type(getOpType(record)).build();
+
+ try {
+ ObjectMapper objectMapper = new ObjectMapper();
+ return StringData.fromString(objectMapper.writeValueAsString(canalJson));
+ } catch (Exception e) {
+ throw new IllegalStateException("exception occurs when get meta data", e);
+ }
+ }
+ }),
/**
* Name of the table that contain the row. .
@@ -190,7 +190,7 @@ public enum MySqlReadableMetadata {
@Override
public Object read(SourceRecord record) {
Struct messageStruct = (Struct) record.value();
- Struct sourceStruct = messageStruct.getStruct(Envelope.FieldName.SOURCE);
+ Struct sourceStruct = messageStruct.getStruct(FieldName.SOURCE);
return TimestampData.fromEpochMillis(
(Long) sourceStruct.get(AbstractSourceInfo.TIMESTAMP_KEY));
}
@@ -249,10 +249,10 @@ public enum MySqlReadableMetadata {
OLD(
"meta.update_before",
DataTypes.ARRAY(
- DataTypes.MAP(
- DataTypes.STRING().nullable(),
- DataTypes.STRING().nullable())
- .nullable())
+ DataTypes.MAP(
+ DataTypes.STRING().nullable(),
+ DataTypes.STRING().nullable())
+ .nullable())
.nullable(),
new MetadataConverter() {
private static final long serialVersionUID = 1L;
@@ -379,10 +379,20 @@ public enum MySqlReadableMetadata {
public Object read(SourceRecord record) {
Struct messageStruct = (Struct) record.value();
return TimestampData.fromEpochMillis(
- (Long) messageStruct.get(Envelope.FieldName.TIMESTAMP));
+ (Long) messageStruct.get(FieldName.TIMESTAMP));
}
});
+ private final String key;
+ private final DataType dataType;
+ private final MetadataConverter converter;
+
+ MySqlReadableMetadata(String key, DataType dataType, MetadataConverter converter) {
+ this.key = key;
+ this.dataType = dataType;
+ this.converter = converter;
+ }
+
private static String getOpType(SourceRecord record) {
String opType;
final Envelope.Operation op = Envelope.operationFor(record);
@@ -410,15 +420,15 @@ public enum MySqlReadableMetadata {
Map<String, String> mysqlType = new HashMap<>();
final Table table = tableSchema.getTable();
table.columns()
- .forEach(
- column -> {
- mysqlType.put(
- column.name(),
- String.format(
- "%s(%d)",
- column.typeName(),
- column.length()));
- });
+ .forEach(
+ column -> {
+ mysqlType.put(
+ column.name(),
+ String.format(
+ "%s(%d)",
+ column.typeName(),
+ column.length()));
+ });
return mysqlType;
}
@@ -428,18 +438,6 @@ public enum MySqlReadableMetadata {
return sourceStruct.getString(tableNameKey);
}
- private final String key;
-
- private final DataType dataType;
-
- private final MetadataConverter converter;
-
- MySqlReadableMetadata(String key, DataType dataType, MetadataConverter converter) {
- this.key = key;
- this.dataType = dataType;
- this.converter = converter;
- }
-
public String getKey() {
return key;
}
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlTableInlongSourceFactory.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlTableInlongSourceFactory.java
similarity index 77%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlTableInlongSourceFactory.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlTableInlongSourceFactory.java
index 9d4382e29..20876ed98 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlTableInlongSourceFactory.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlTableInlongSourceFactory.java
@@ -16,42 +16,8 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.table;
+package org.apache.inlong.sort.cdc.mysql.table;
-import static org.apache.flink.util.Preconditions.checkState;
-import static org.apache.inlong.sort.singletenant.flink.cdc.debezium.table.DebeziumOptions.getDebeziumProperties;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.APPEND_MODE;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.CHUNK_META_GROUP_SIZE;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.CONNECTION_POOL_SIZE;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.CONNECT_MAX_RETRIES;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.CONNECT_TIMEOUT;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.DATABASE_NAME;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.HEARTBEAT_INTERVAL;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.HOSTNAME;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.MIGRATE_ALL;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.PASSWORD;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.PORT;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SCAN_INCREMENTAL_SNAPSHOT_CHUNK_SIZE;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SCAN_INCREMENTAL_SNAPSHOT_ENABLED;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SCAN_NEWLY_ADDED_TABLE_ENABLED;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SCAN_SNAPSHOT_FETCH_SIZE;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SCAN_STARTUP_MODE;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SCAN_STARTUP_SPECIFIC_OFFSET_FILE;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SCAN_STARTUP_SPECIFIC_OFFSET_POS;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SCAN_STARTUP_TIMESTAMP_MILLIS;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SERVER_ID;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SERVER_TIME_ZONE;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SPLIT_KEY_EVEN_DISTRIBUTION_FACTOR_LOWER_BOUND;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.SPLIT_KEY_EVEN_DISTRIBUTION_FACTOR_UPPER_BOUND;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.TABLE_NAME;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions.USERNAME;
-import static org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.utils.ObjectUtils.doubleCompare;
-
-import java.time.Duration;
-import java.time.ZoneId;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.regex.Pattern;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ReadableConfig;
import org.apache.flink.table.api.ValidationException;
@@ -60,9 +26,44 @@ import org.apache.flink.table.connector.source.DynamicTableSource;
import org.apache.flink.table.factories.DynamicTableSourceFactory;
import org.apache.flink.table.factories.FactoryUtil;
import org.apache.flink.util.Preconditions;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.table.DebeziumOptions;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.MySqlSourceOptions;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.config.ServerIdRange;
+import org.apache.inlong.sort.cdc.debezium.table.DebeziumOptions;
+import org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions;
+import org.apache.inlong.sort.cdc.mysql.source.config.ServerIdRange;
+
+import java.time.Duration;
+import java.time.ZoneId;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.regex.Pattern;
+
+import static org.apache.flink.util.Preconditions.checkState;
+import static org.apache.inlong.sort.cdc.debezium.table.DebeziumOptions.getDebeziumProperties;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.APPEND_MODE;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.CHUNK_META_GROUP_SIZE;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.CONNECTION_POOL_SIZE;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.CONNECT_MAX_RETRIES;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.CONNECT_TIMEOUT;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.DATABASE_NAME;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.HEARTBEAT_INTERVAL;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.HOSTNAME;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.MIGRATE_ALL;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.PASSWORD;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.PORT;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SCAN_INCREMENTAL_SNAPSHOT_CHUNK_SIZE;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SCAN_INCREMENTAL_SNAPSHOT_ENABLED;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SCAN_NEWLY_ADDED_TABLE_ENABLED;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SCAN_SNAPSHOT_FETCH_SIZE;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SCAN_STARTUP_MODE;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SCAN_STARTUP_SPECIFIC_OFFSET_FILE;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SCAN_STARTUP_SPECIFIC_OFFSET_POS;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SCAN_STARTUP_TIMESTAMP_MILLIS;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SERVER_ID;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SERVER_TIME_ZONE;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SPLIT_KEY_EVEN_DISTRIBUTION_FACTOR_LOWER_BOUND;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.SPLIT_KEY_EVEN_DISTRIBUTION_FACTOR_UPPER_BOUND;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.TABLE_NAME;
+import static org.apache.inlong.sort.cdc.mysql.source.config.MySqlSourceOptions.USERNAME;
+import static org.apache.inlong.sort.cdc.mysql.source.utils.ObjectUtils.doubleCompare;
/**
* Factory for creating configured instance of {@link MySqlTableSource}.
@@ -279,7 +280,7 @@ public class MySqlTableInlongSourceFactory implements DynamicTableSourceFactory
* Checks the given regular expression's syntax is valid.
*
* @param optionName the option name of the regex
- * @param regex The regular expression to be checked
+ * @param regex The regular expression to be checked
* @throws ValidationException If the expression's syntax is invalid
*/
private void validateRegex(String optionName, String regex) {
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlTableSource.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlTableSource.java
similarity index 95%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlTableSource.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlTableSource.java
index b4fd3f01e..8e0d76013 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/MySqlTableSource.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/MySqlTableSource.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.table;
+package org.apache.inlong.sort.cdc.mysql.table;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.table.catalog.ResolvedSchema;
@@ -30,11 +30,11 @@ import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.RowType;
import org.apache.flink.types.RowKind;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.DebeziumDeserializationSchema;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.DebeziumSourceFunction;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.table.MetadataConverter;
-import org.apache.inlong.sort.singletenant.flink.cdc.debezium.table.RowDataDebeziumDeserializeSchema;
-import org.apache.inlong.sort.singletenant.flink.cdc.mysql.source.MySqlSource;
+import org.apache.inlong.sort.cdc.debezium.DebeziumDeserializationSchema;
+import org.apache.inlong.sort.cdc.debezium.DebeziumSourceFunction;
+import org.apache.inlong.sort.cdc.debezium.table.MetadataConverter;
+import org.apache.inlong.sort.cdc.debezium.table.RowDataDebeziumDeserializeSchema;
+import org.apache.inlong.sort.cdc.mysql.source.MySqlSource;
import javax.annotation.Nullable;
import java.time.Duration;
@@ -121,7 +121,7 @@ public class MySqlTableSource implements ScanTableSource, SupportsReadingMetadat
boolean appendSource,
StartupOptions startupOptions,
Duration heartbeatInterval,
- boolean migrateAll) {
+ boolean migrateAll) {
this(
physicalSchema,
port,
@@ -147,7 +147,7 @@ public class MySqlTableSource implements ScanTableSource, SupportsReadingMetadat
false,
new Properties(),
heartbeatInterval,
- migrateAll);
+ migrateAll);
}
/**
@@ -178,7 +178,7 @@ public class MySqlTableSource implements ScanTableSource, SupportsReadingMetadat
boolean scanNewlyAddedTableEnabled,
Properties jdbcProperties,
Duration heartbeatInterval,
- boolean migrateAll) {
+ boolean migrateAll) {
this.physicalSchema = physicalSchema;
this.port = port;
this.hostname = checkNotNull(hostname);
@@ -238,7 +238,7 @@ public class MySqlTableSource implements ScanTableSource, SupportsReadingMetadat
.setAppendSource(appendSource)
.setUserDefinedConverterFactory(
MySqlDeserializationConverterFactory.instance())
- .setMigrateAll(migrateAll)
+ .setMigrateAll(migrateAll)
.build();
if (enableParallelRead) {
MySqlSource<RowData> parallelSource =
@@ -268,8 +268,8 @@ public class MySqlTableSource implements ScanTableSource, SupportsReadingMetadat
.build();
return SourceProvider.of(parallelSource);
} else {
- org.apache.inlong.sort.singletenant.flink.cdc.mysql.MySqlSource.Builder<RowData> builder =
- org.apache.inlong.sort.singletenant.flink.cdc.mysql.MySqlSource.<RowData>builder()
+ org.apache.inlong.sort.cdc.mysql.MySqlSource.Builder<RowData> builder =
+ org.apache.inlong.sort.cdc.mysql.MySqlSource.<RowData>builder()
.hostname(hostname)
.port(port)
.databaseList(database)
diff --git a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/OldFieldMetadataConverter.java b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/OldFieldMetadataConverter.java
similarity index 98%
rename from inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/OldFieldMetadataConverter.java
rename to inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/OldFieldMetadataConverter.java
index 2fb65508a..21a9af792 100644
--- a/inlong-sort/sort-single-tenant/src/main/java/org/apache/inlong/sort/singletenant/flink/cdc/mysql/table/OldFieldMetadataConverter.java
+++ b/inlong-sort/sort-connectors/mysql-cdc/src/main/java/org/apache/inlong/sort/cdc/mysql/table/OldFieldMetadataConverter.java
@@ -16,11 +16,8 @@
* limitations under the License.
*/
-package org.apache.inlong.sort.singletenant.flink.cdc.mysql.table;
+package org.apache.inlong.sort.cdc.mysql.table;
-import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE;
-
-import com.ververica.cdc.debezium.utils.TemporalConversions;
import io.debezium.data.Envelope;
import io.debezium.data.SpecialValueDecimal;
import io.debezium.data.VariableScaleDecimal;
@@ -29,20 +26,6 @@ import io.debezium.time.MicroTimestamp;
import io.debezium.time.NanoTime;
import io.debezium.time.NanoTimestamp;
import io.debezium.time.Timestamp;
-import java.io.Serializable;
-import java.math.BigDecimal;
-import java.nio.ByteBuffer;
-import java.time.Instant;
-import java.time.LocalDateTime;
-import java.time.LocalTime;
-import java.time.ZoneId;
-import java.time.ZoneOffset;
-import java.time.format.DateTimeFormatter;
-import java.time.format.DateTimeFormatterBuilder;
-import java.time.temporal.ChronoField;
-import java.util.Base64;
... 33410 lines suppressed ...