You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@shardingsphere.apache.org by zh...@apache.org on 2023/06/29 11:58:14 UTC

[shardingsphere] branch master updated: Expand wildcard in single table configuration (#26676)

This is an automated email from the ASF dual-hosted git repository.

zhaojinchao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/shardingsphere.git


The following commit(s) were added to refs/heads/master by this push:
     new 66a9d263b23 Expand wildcard in single table configuration (#26676)
66a9d263b23 is described below

commit 66a9d263b23b169a2bd6003d25b271c69c35868b
Author: Raigor <ra...@gmail.com>
AuthorDate: Thu Jun 29 19:58:06 2023 +0800

    Expand wildcard in single table configuration (#26676)
    
    * Expand wildcard in single table configuration
    
    * Update comment
    
    * Fix RDL E2E
    
    * Fix transaction E2E
    
    * Fix SQL E2E
    
    * Update pipeline E2E
    
    * Update pipeline E2E
    
    * merge master.
    
    * Skip drop database if not exist.
    
    * Update single configuration subscriber
    
    * Add single rule to pipeline data source.
---
 .../rule/decorator/RuleConfigurationDecorator.java |  47 ++++++
 .../shardingsphere/infra/datanode/DataNode.java    |  19 ++-
 .../ShardingSpherePipelineDataSourceCreator.java   |  11 ++
 .../metadata/persist/MetaDataPersistService.java   |  19 ++-
 .../persist/NewMetaDataPersistService.java         |  19 ++-
 .../SingleRuleConfigurationDecorator.java          | 182 +++++++++++++++++++++
 ...onfig.rule.decorator.RuleConfigurationDecorator |  18 ++
 .../update/LoadSingleTableStatementUpdater.java    |  26 +--
 .../database/DropDatabaseBackendHandler.java       |   4 +-
 .../rdl/rule/NewRuleDefinitionBackendHandler.java  |  15 +-
 .../rdl/rule/RuleDefinitionBackendHandler.java     |  16 +-
 .../general/MySQLTimeTypesMigrationE2EIT.java      |   2 +-
 .../migration/general/RulesMigrationE2EIT.java     |   2 +-
 .../proxy/conf/opengauss/config-sharding.yaml      |   2 +-
 .../cursor/proxy/conf/postgresql/rule.yaml         |   2 +-
 .../default/proxy/conf/mysql/config-sharding.yaml  |   3 +
 .../proxy/conf/opengauss/config-sharding.yaml      |   2 +-
 .../proxy/conf/postgresql/config-sharding.yaml     |   3 +
 .../conf/opengauss/config-readwrite-splitting.yaml |   2 +-
 .../postgresql/config-readwrite-splitting.yaml     |   2 +-
 .../resources/cases/rdl/rdl-integration-create.xml |   1 +
 21 files changed, 361 insertions(+), 36 deletions(-)

diff --git a/infra/common/src/main/java/org/apache/shardingsphere/infra/config/rule/decorator/RuleConfigurationDecorator.java b/infra/common/src/main/java/org/apache/shardingsphere/infra/config/rule/decorator/RuleConfigurationDecorator.java
new file mode 100644
index 00000000000..006f50223bf
--- /dev/null
+++ b/infra/common/src/main/java/org/apache/shardingsphere/infra/config/rule/decorator/RuleConfigurationDecorator.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.shardingsphere.infra.config.rule.decorator;
+
+import org.apache.shardingsphere.infra.config.rule.RuleConfiguration;
+import org.apache.shardingsphere.infra.rule.ShardingSphereRule;
+import org.apache.shardingsphere.infra.util.spi.annotation.SingletonSPI;
+import org.apache.shardingsphere.infra.util.spi.type.typed.TypedSPI;
+
+import javax.sql.DataSource;
+import java.util.Collection;
+import java.util.Map;
+
+/**
+ * Rule configuration decorator.
+ * 
+ * @param <T> type of rule configuration
+ */
+@SingletonSPI
+public interface RuleConfigurationDecorator<T extends RuleConfiguration> extends TypedSPI {
+    
+    /**
+     * Decorate rule configuration.
+     * 
+     * @param databaseName database name
+     * @param dataSources data sources
+     * @param builtRules built rules
+     * @param ruleConfig rule configuration to be decorated
+     * @return decorated rule configuration
+     */
+    T decorate(String databaseName, Map<String, DataSource> dataSources, Collection<ShardingSphereRule> builtRules, T ruleConfig);
+}
diff --git a/infra/common/src/main/java/org/apache/shardingsphere/infra/datanode/DataNode.java b/infra/common/src/main/java/org/apache/shardingsphere/infra/datanode/DataNode.java
index 319068d3f72..e11dbe64148 100644
--- a/infra/common/src/main/java/org/apache/shardingsphere/infra/datanode/DataNode.java
+++ b/infra/common/src/main/java/org/apache/shardingsphere/infra/datanode/DataNode.java
@@ -41,6 +41,8 @@ public final class DataNode {
     
     private static final String DELIMITER = ".";
     
+    private static final String ASTERISK = "*";
+    
     private final String dataSourceName;
     
     private final String tableName;
@@ -78,20 +80,19 @@ public final class DataNode {
         ShardingSpherePreconditions.checkState(dataNode.contains(DELIMITER),
                 () -> new InvalidDataNodesFormatException(dataNode, String.format("Invalid format for data node `%s`", dataNode)));
         boolean containsSchema = isValidDataNode(dataNode, 3);
-        if (containsSchema) {
-            ShardingSpherePreconditions.checkState(databaseType instanceof SchemaSupportedDatabaseType,
-                    () -> new InvalidDataNodesFormatException(dataNode,
-                            String.format("Current database type `%s` does not support schema, please use format `db.table`", databaseType.getType())));
-        } else {
-            ShardingSpherePreconditions.checkState(!(databaseType instanceof SchemaSupportedDatabaseType),
-                    () -> new InvalidDataNodesFormatException(dataNode, String.format("Current database type `%s` is schema required, please use format `db.schema.table`", databaseType.getType())));
-        }
         List<String> segments = Splitter.on(DELIMITER).splitToList(dataNode);
         dataSourceName = segments.get(0);
-        schemaName = containsSchema ? segments.get(1) : databaseName;
+        schemaName = getSchemaName(databaseName, databaseType, containsSchema, segments);
         tableName = containsSchema ? segments.get(2).toLowerCase() : segments.get(1).toLowerCase();
     }
     
+    private String getSchemaName(final String databaseName, final DatabaseType databaseType, final boolean containsSchema, final List<String> segments) {
+        if (databaseType instanceof SchemaSupportedDatabaseType) {
+            return containsSchema ? segments.get(1) : ASTERISK;
+        }
+        return databaseName;
+    }
+    
     private boolean isValidDataNode(final String dataNodeStr, final Integer tier) {
         return dataNodeStr.contains(DELIMITER) && tier == Splitter.on(DELIMITER).omitEmptyStrings().splitToList(dataNodeStr).size();
     }
diff --git a/jdbc/core/src/main/java/org/apache/shardingsphere/driver/data/pipeline/datasource/creator/ShardingSpherePipelineDataSourceCreator.java b/jdbc/core/src/main/java/org/apache/shardingsphere/driver/data/pipeline/datasource/creator/ShardingSpherePipelineDataSourceCreator.java
index 7c5412ba742..8705a49c477 100644
--- a/jdbc/core/src/main/java/org/apache/shardingsphere/driver/data/pipeline/datasource/creator/ShardingSpherePipelineDataSourceCreator.java
+++ b/jdbc/core/src/main/java/org/apache/shardingsphere/driver/data/pipeline/datasource/creator/ShardingSpherePipelineDataSourceCreator.java
@@ -26,9 +26,12 @@ import org.apache.shardingsphere.infra.yaml.config.pojo.YamlRootConfiguration;
 import org.apache.shardingsphere.infra.yaml.config.pojo.algorithm.YamlAlgorithmConfiguration;
 import org.apache.shardingsphere.sharding.yaml.config.YamlShardingRuleConfiguration;
 import org.apache.shardingsphere.sharding.yaml.swapper.ShardingRuleConfigurationConverter;
+import org.apache.shardingsphere.single.api.constant.SingleTableConstants;
+import org.apache.shardingsphere.single.yaml.config.pojo.YamlSingleRuleConfiguration;
 
 import javax.sql.DataSource;
 import java.sql.SQLException;
+import java.util.Collections;
 import java.util.Optional;
 
 /**
@@ -47,9 +50,17 @@ public final class ShardingSpherePipelineDataSourceCreator implements PipelineDa
         }
         rootConfig.setDatabaseName(rootConfig.getDatabaseName());
         rootConfig.setSchemaName(rootConfig.getSchemaName());
+        updateSingleRuleConfiguration(rootConfig);
         return YamlShardingSphereDataSourceFactory.createDataSourceWithoutCache(rootConfig);
     }
     
+    private void updateSingleRuleConfiguration(final YamlRootConfiguration rootConfig) {
+        rootConfig.getRules().removeIf(each -> each instanceof YamlSingleRuleConfiguration);
+        YamlSingleRuleConfiguration singleRuleConfig = new YamlSingleRuleConfiguration();
+        singleRuleConfig.setTables(Collections.singletonList(SingleTableConstants.ALL_TABLES));
+        rootConfig.getRules().add(singleRuleConfig);
+    }
+    
     // TODO Another way is improving ExecuteQueryCallback.executeSQL to enable streaming query, then remove it
     private void enableStreamingQuery(final YamlRootConfiguration rootConfig) {
         // Set a large enough value to enable ConnectionMode.MEMORY_STRICTLY, make sure streaming query work.
diff --git a/kernel/metadata/core/src/main/java/org/apache/shardingsphere/metadata/persist/MetaDataPersistService.java b/kernel/metadata/core/src/main/java/org/apache/shardingsphere/metadata/persist/MetaDataPersistService.java
index b371c101585..3b5566aab8a 100644
--- a/kernel/metadata/core/src/main/java/org/apache/shardingsphere/metadata/persist/MetaDataPersistService.java
+++ b/kernel/metadata/core/src/main/java/org/apache/shardingsphere/metadata/persist/MetaDataPersistService.java
@@ -20,11 +20,13 @@ package org.apache.shardingsphere.metadata.persist;
 import lombok.Getter;
 import org.apache.shardingsphere.infra.config.database.DatabaseConfiguration;
 import org.apache.shardingsphere.infra.config.rule.RuleConfiguration;
+import org.apache.shardingsphere.infra.config.rule.decorator.RuleConfigurationDecorator;
 import org.apache.shardingsphere.infra.datasource.pool.creator.DataSourcePoolCreator;
 import org.apache.shardingsphere.infra.datasource.pool.destroyer.DataSourcePoolDestroyer;
 import org.apache.shardingsphere.infra.datasource.props.DataSourceProperties;
 import org.apache.shardingsphere.infra.datasource.props.DataSourcePropertiesCreator;
 import org.apache.shardingsphere.infra.rule.ShardingSphereRule;
+import org.apache.shardingsphere.infra.util.spi.type.typed.TypedSPILoader;
 import org.apache.shardingsphere.metadata.persist.data.ShardingSphereDataPersistService;
 import org.apache.shardingsphere.metadata.persist.service.config.database.DataSourcePersistService;
 import org.apache.shardingsphere.metadata.persist.service.config.database.DatabaseRulePersistService;
@@ -37,6 +39,7 @@ import org.apache.shardingsphere.mode.spi.PersistRepository;
 import javax.sql.DataSource;
 import java.util.Collection;
 import java.util.LinkedHashMap;
+import java.util.LinkedList;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
@@ -88,10 +91,24 @@ public final class MetaDataPersistService implements MetaDataBasedPersistService
             databaseMetaDataService.addDatabase(databaseName);
         } else {
             dataSourceService.persist(databaseName, getDataSourcePropertiesMap(databaseConfigs.getDataSources()));
-            databaseRulePersistService.persist(databaseName, databaseConfigs.getRuleConfigurations());
+            databaseRulePersistService.persist(databaseName, decorateRuleConfigs(databaseName, dataSources, rules));
         }
     }
     
+    @SuppressWarnings("unchecked")
+    private Collection<RuleConfiguration> decorateRuleConfigs(final String databaseName, final Map<String, DataSource> dataSources, final Collection<ShardingSphereRule> rules) {
+        Collection<RuleConfiguration> result = new LinkedList<>();
+        for (ShardingSphereRule each : rules) {
+            RuleConfiguration ruleConfig = each.getConfiguration();
+            if (TypedSPILoader.contains(RuleConfigurationDecorator.class, ruleConfig.getClass().getName())) {
+                result.add(TypedSPILoader.getService(RuleConfigurationDecorator.class, ruleConfig.getClass().getName()).decorate(databaseName, dataSources, rules, ruleConfig));
+            } else {
+                result.add(each.getConfiguration());
+            }
+        }
+        return result;
+    }
+    
     private Map<String, DataSourceProperties> getDataSourcePropertiesMap(final Map<String, DataSource> dataSourceMap) {
         Map<String, DataSourceProperties> result = new LinkedHashMap<>(dataSourceMap.size(), 1F);
         for (Entry<String, DataSource> entry : dataSourceMap.entrySet()) {
diff --git a/kernel/metadata/core/src/main/java/org/apache/shardingsphere/metadata/persist/NewMetaDataPersistService.java b/kernel/metadata/core/src/main/java/org/apache/shardingsphere/metadata/persist/NewMetaDataPersistService.java
index 0d1a412e5fd..7900ec146e4 100644
--- a/kernel/metadata/core/src/main/java/org/apache/shardingsphere/metadata/persist/NewMetaDataPersistService.java
+++ b/kernel/metadata/core/src/main/java/org/apache/shardingsphere/metadata/persist/NewMetaDataPersistService.java
@@ -20,11 +20,13 @@ package org.apache.shardingsphere.metadata.persist;
 import lombok.Getter;
 import org.apache.shardingsphere.infra.config.database.DatabaseConfiguration;
 import org.apache.shardingsphere.infra.config.rule.RuleConfiguration;
+import org.apache.shardingsphere.infra.config.rule.decorator.RuleConfigurationDecorator;
 import org.apache.shardingsphere.infra.datasource.pool.creator.DataSourcePoolCreator;
 import org.apache.shardingsphere.infra.datasource.pool.destroyer.DataSourcePoolDestroyer;
 import org.apache.shardingsphere.infra.datasource.props.DataSourceProperties;
 import org.apache.shardingsphere.infra.datasource.props.DataSourcePropertiesCreator;
 import org.apache.shardingsphere.infra.rule.ShardingSphereRule;
+import org.apache.shardingsphere.infra.util.spi.type.typed.TypedSPILoader;
 import org.apache.shardingsphere.metadata.persist.data.ShardingSphereDataPersistService;
 import org.apache.shardingsphere.metadata.persist.service.config.database.NewDataSourcePersistService;
 import org.apache.shardingsphere.metadata.persist.service.config.database.NewDatabaseRulePersistService;
@@ -37,6 +39,7 @@ import org.apache.shardingsphere.mode.spi.PersistRepository;
 import javax.sql.DataSource;
 import java.util.Collection;
 import java.util.LinkedHashMap;
+import java.util.LinkedList;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
@@ -95,10 +98,24 @@ public final class NewMetaDataPersistService implements MetaDataBasedPersistServ
             databaseMetaDataService.addDatabase(databaseName);
         } else {
             dataSourceService.persist(databaseName, getDataSourcePropertiesMap(databaseConfigs.getDataSources()));
-            databaseRulePersistService.persist(databaseName, databaseConfigs.getRuleConfigurations());
+            databaseRulePersistService.persist(databaseName, decorateRuleConfigs(databaseName, dataSources, rules));
         }
     }
     
+    @SuppressWarnings("unchecked")
+    private Collection<RuleConfiguration> decorateRuleConfigs(final String databaseName, final Map<String, DataSource> dataSources, final Collection<ShardingSphereRule> rules) {
+        Collection<RuleConfiguration> result = new LinkedList<>();
+        for (ShardingSphereRule each : rules) {
+            RuleConfiguration ruleConfig = each.getConfiguration();
+            if (TypedSPILoader.contains(RuleConfigurationDecorator.class, ruleConfig.getClass().getName())) {
+                result.add(TypedSPILoader.getService(RuleConfigurationDecorator.class, ruleConfig.getClass().getName()).decorate(databaseName, dataSources, rules, ruleConfig));
+            } else {
+                result.add(each.getConfiguration());
+            }
+        }
+        return result;
+    }
+    
     private Map<String, DataSourceProperties> getDataSourcePropertiesMap(final Map<String, DataSource> dataSourceMap) {
         Map<String, DataSourceProperties> result = new LinkedHashMap<>(dataSourceMap.size(), 1F);
         for (Entry<String, DataSource> entry : dataSourceMap.entrySet()) {
diff --git a/kernel/single/core/src/main/java/org/apache/shardingsphere/single/decorator/SingleRuleConfigurationDecorator.java b/kernel/single/core/src/main/java/org/apache/shardingsphere/single/decorator/SingleRuleConfigurationDecorator.java
new file mode 100644
index 00000000000..8633bb69d4a
--- /dev/null
+++ b/kernel/single/core/src/main/java/org/apache/shardingsphere/single/decorator/SingleRuleConfigurationDecorator.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.shardingsphere.single.decorator;
+
+import org.apache.shardingsphere.infra.config.rule.decorator.RuleConfigurationDecorator;
+import org.apache.shardingsphere.infra.database.type.DatabaseType;
+import org.apache.shardingsphere.infra.database.type.DatabaseTypeEngine;
+import org.apache.shardingsphere.infra.database.type.SchemaSupportedDatabaseType;
+import org.apache.shardingsphere.infra.datanode.DataNode;
+import org.apache.shardingsphere.infra.datasource.state.DataSourceStateManager;
+import org.apache.shardingsphere.infra.rule.ShardingSphereRule;
+import org.apache.shardingsphere.infra.util.exception.ShardingSpherePreconditions;
+import org.apache.shardingsphere.single.api.config.SingleRuleConfiguration;
+import org.apache.shardingsphere.single.api.constant.SingleTableConstants;
+import org.apache.shardingsphere.single.datanode.SingleTableDataNodeLoader;
+import org.apache.shardingsphere.single.exception.InvalidSingleRuleConfigurationException;
+import org.apache.shardingsphere.single.rule.SingleRule;
+import org.apache.shardingsphere.single.util.SingleTableLoadUtils;
+
+import javax.sql.DataSource;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Map.Entry;
+
+/**
+ * Single rule configuration decorator.
+ */
+public final class SingleRuleConfigurationDecorator implements RuleConfigurationDecorator<SingleRuleConfiguration> {
+    
+    @Override
+    public SingleRuleConfiguration decorate(final String databaseName, final Map<String, DataSource> dataSources,
+                                            final Collection<ShardingSphereRule> builtRules, final SingleRuleConfiguration ruleConfig) {
+        SingleRuleConfiguration result = new SingleRuleConfiguration();
+        result.getTables().addAll(decorateTables(databaseName, dataSources, new LinkedList<>(builtRules), ruleConfig.getTables()));
+        ruleConfig.getDefaultDataSource().ifPresent(result::setDefaultDataSource);
+        return result;
+    }
+    
+    private Collection<String> decorateTables(final String databaseName, final Map<String, DataSource> dataSources, final Collection<ShardingSphereRule> builtRules, final Collection<String> tables) {
+        builtRules.removeIf(SingleRule.class::isInstance);
+        if (tables.isEmpty() && builtRules.isEmpty()) {
+            return Collections.emptyList();
+        }
+        Collection<String> splitTables = SingleTableLoadUtils.splitTableLines(tables);
+        if (!isExpandRequired(splitTables)) {
+            return splitTables;
+        }
+        Map<String, DataSource> enabledDataSources = DataSourceStateManager.getInstance().getEnabledDataSourceMap(databaseName, dataSources);
+        Map<String, DataSource> aggregatedDataSources = SingleTableLoadUtils.getAggregatedDataSourceMap(enabledDataSources, builtRules);
+        DatabaseType databaseType = DatabaseTypeEngine.getStorageType(enabledDataSources.values());
+        Collection<String> excludedTables = SingleTableLoadUtils.getExcludedTables(builtRules);
+        Map<String, Collection<DataNode>> actualDataNodes = SingleTableDataNodeLoader.load(databaseName, databaseType, aggregatedDataSources, excludedTables);
+        Collection<DataNode> configuredDataNodes = SingleTableLoadUtils.convertToDataNodes(databaseName, databaseType, splitTables);
+        checkRuleConfiguration(databaseName, aggregatedDataSources, excludedTables, configuredDataNodes);
+        boolean isSchemaSupportedDatabaseType = databaseType instanceof SchemaSupportedDatabaseType;
+        if (splitTables.contains(SingleTableConstants.ALL_TABLES) || splitTables.contains(SingleTableConstants.ALL_SCHEMA_TABLES)) {
+            return loadAllTables(isSchemaSupportedDatabaseType, actualDataNodes);
+        }
+        return loadSpecifiedTables(isSchemaSupportedDatabaseType, actualDataNodes, builtRules, configuredDataNodes);
+    }
+    
+    private boolean isExpandRequired(final Collection<String> splitTables) {
+        return splitTables.stream().anyMatch(each -> each.contains(SingleTableConstants.ASTERISK));
+    }
+    
+    private Collection<String> loadSpecifiedTables(final boolean isSchemaSupportedDatabaseType, final Map<String, Collection<DataNode>> actualDataNodes,
+                                                   final Collection<ShardingSphereRule> builtRules, final Collection<DataNode> configuredDataNodes) {
+        Collection<String> expandRequiredDataSources = new LinkedHashSet<>();
+        Map<String, DataNode> expectedDataNodes = new LinkedHashMap<>();
+        for (DataNode each : configuredDataNodes) {
+            if (SingleTableConstants.ASTERISK.equals(each.getTableName())) {
+                expandRequiredDataSources.add(each.getDataSourceName());
+            } else {
+                expectedDataNodes.put(each.getTableName(), each);
+            }
+        }
+        if (expandRequiredDataSources.isEmpty()) {
+            return loadSpecifiedTablesWithoutExpand(isSchemaSupportedDatabaseType, actualDataNodes, configuredDataNodes);
+        }
+        Collection<String> featureRequiredSingleTables = SingleTableLoadUtils.getFeatureRequiredSingleTables(builtRules);
+        return loadSpecifiedTablesWithExpand(isSchemaSupportedDatabaseType, actualDataNodes, featureRequiredSingleTables, expandRequiredDataSources, expectedDataNodes);
+    }
+    
+    private Collection<String> loadSpecifiedTablesWithExpand(final boolean isSchemaSupportedDatabaseType, final Map<String, Collection<DataNode>> actualDataNodes,
+                                                             final Collection<String> featureRequiredSingleTables, final Collection<String> expandRequiredDataSources,
+                                                             final Map<String, DataNode> expectedDataNodes) {
+        Collection<String> result = new LinkedHashSet<>();
+        for (Entry<String, Collection<DataNode>> entry : actualDataNodes.entrySet()) {
+            if (featureRequiredSingleTables.contains(entry.getKey())) {
+                continue;
+            }
+            DataNode physicalDataNode = entry.getValue().iterator().next();
+            if (expandRequiredDataSources.contains(physicalDataNode.getDataSourceName())) {
+                result.add(getTableNodeString(isSchemaSupportedDatabaseType, physicalDataNode));
+                continue;
+            }
+            if (expectedDataNodes.containsKey(entry.getKey())) {
+                DataNode dataNode = expectedDataNodes.get(entry.getKey());
+                String tableNodeStr = getTableNodeString(isSchemaSupportedDatabaseType, physicalDataNode);
+                ShardingSpherePreconditions.checkState(physicalDataNode.equals(dataNode),
+                        () -> new InvalidSingleRuleConfigurationException(String.format("Single table `%s` is found that does not match %s", tableNodeStr,
+                                getTableNodeString(isSchemaSupportedDatabaseType, dataNode))));
+                result.add(tableNodeStr);
+            }
+        }
+        return result;
+    }
+    
+    private Collection<String> loadSpecifiedTablesWithoutExpand(final boolean isSchemaSupportedDatabaseType, final Map<String, Collection<DataNode>> actualDataNodes,
+                                                                final Collection<DataNode> configuredDataNodes) {
+        Collection<String> result = new LinkedHashSet<>();
+        for (DataNode each : configuredDataNodes) {
+            ShardingSpherePreconditions.checkState(actualDataNodes.containsKey(each.getTableName()),
+                    () -> new InvalidSingleRuleConfigurationException(String.format("Single table `%s` does not exist", getTableNodeString(isSchemaSupportedDatabaseType, each))));
+            DataNode actualDataNode = actualDataNodes.get(each.getTableName()).iterator().next();
+            String tableNodeStr = getTableNodeString(isSchemaSupportedDatabaseType, actualDataNode);
+            ShardingSpherePreconditions.checkState(actualDataNode.equals(each),
+                    () -> new InvalidSingleRuleConfigurationException(String.format("Single table `%s` is found that does not match %s", tableNodeStr,
+                            getTableNodeString(isSchemaSupportedDatabaseType, each))));
+            result.add(tableNodeStr);
+        }
+        return result;
+    }
+    
+    private Collection<String> loadAllTables(final boolean isSchemaSupportedDatabaseType, final Map<String, Collection<DataNode>> actualDataNodes) {
+        Collection<String> result = new LinkedList<>();
+        for (Entry<String, Collection<DataNode>> entry : actualDataNodes.entrySet()) {
+            result.add(getTableNodeString(isSchemaSupportedDatabaseType, entry.getValue().iterator().next()));
+        }
+        return result;
+    }
+    
+    private String getTableNodeString(final boolean isSchemaSupportedDatabaseType, final DataNode dataNode) {
+        return isSchemaSupportedDatabaseType
+                ? formatTableName(dataNode.getDataSourceName(), dataNode.getSchemaName(), dataNode.getTableName())
+                : formatTableName(dataNode.getDataSourceName(), dataNode.getTableName());
+    }
+    
+    private void checkRuleConfiguration(final String databaseName, final Map<String, DataSource> dataSources, final Collection<String> excludedTables, final Collection<DataNode> dataNodes) {
+        for (DataNode each : dataNodes) {
+            if (!SingleTableConstants.ASTERISK.equals(each.getDataSourceName())) {
+                ShardingSpherePreconditions.checkState(dataSources.containsKey(each.getDataSourceName()),
+                        () -> new InvalidSingleRuleConfigurationException(String.format("Data source `%s` does not exist in database `%s`", each.getDataSourceName(), databaseName)));
+            }
+            ShardingSpherePreconditions.checkState(!excludedTables.contains(each.getTableName()),
+                    () -> new InvalidSingleRuleConfigurationException(String.format("Table `%s` existed and is not a single table in database `%s`",
+                            each.getTableName(), databaseName)));
+        }
+    }
+    
+    private String formatTableName(final String dataSourceName, final String tableName) {
+        return String.format("%s.%s", dataSourceName, tableName);
+    }
+    
+    private String formatTableName(final String dataSourceName, final String schemaName, final String tableName) {
+        return String.format("%s.%s.%s", dataSourceName, schemaName, tableName);
+    }
+    
+    @Override
+    public String getType() {
+        return SingleRuleConfiguration.class.getName();
+    }
+}
diff --git a/kernel/single/core/src/main/resources/META-INF/services/org.apache.shardingsphere.infra.config.rule.decorator.RuleConfigurationDecorator b/kernel/single/core/src/main/resources/META-INF/services/org.apache.shardingsphere.infra.config.rule.decorator.RuleConfigurationDecorator
new file mode 100644
index 00000000000..7674d117a38
--- /dev/null
+++ b/kernel/single/core/src/main/resources/META-INF/services/org.apache.shardingsphere.infra.config.rule.decorator.RuleConfigurationDecorator
@@ -0,0 +1,18 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+org.apache.shardingsphere.single.decorator.SingleRuleConfigurationDecorator
diff --git a/kernel/single/distsql/handler/src/main/java/org/apache/shardingsphere/single/distsql/handler/update/LoadSingleTableStatementUpdater.java b/kernel/single/distsql/handler/src/main/java/org/apache/shardingsphere/single/distsql/handler/update/LoadSingleTableStatementUpdater.java
index 3c9a1f83ab7..23df71210b5 100644
--- a/kernel/single/distsql/handler/src/main/java/org/apache/shardingsphere/single/distsql/handler/update/LoadSingleTableStatementUpdater.java
+++ b/kernel/single/distsql/handler/src/main/java/org/apache/shardingsphere/single/distsql/handler/update/LoadSingleTableStatementUpdater.java
@@ -17,7 +17,6 @@
 
 package org.apache.shardingsphere.single.distsql.handler.update;
 
-import org.apache.shardingsphere.dialect.exception.syntax.table.TableExistsException;
 import org.apache.shardingsphere.distsql.handler.exception.storageunit.MissingRequiredStorageUnitsException;
 import org.apache.shardingsphere.distsql.handler.update.RuleDefinitionCreateUpdater;
 import org.apache.shardingsphere.infra.database.type.DatabaseTypeEngine;
@@ -35,13 +34,16 @@ import org.apache.shardingsphere.single.distsql.handler.exception.MissingRequire
 import org.apache.shardingsphere.single.distsql.segment.SingleTableSegment;
 import org.apache.shardingsphere.single.distsql.statement.rdl.LoadSingleTableStatement;
 import org.apache.shardingsphere.single.exception.InvalidSingleRuleConfigurationException;
+import org.apache.shardingsphere.single.rule.SingleRule;
 import org.apache.shardingsphere.single.util.SingleTableLoadUtils;
 
 import javax.sql.DataSource;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.LinkedHashMap;
 import java.util.LinkedList;
 import java.util.Map;
+import java.util.Optional;
 import java.util.stream.Collectors;
 
 /**
@@ -58,27 +60,22 @@ public final class LoadSingleTableStatementUpdater implements RuleDefinitionCrea
     }
     
     private void checkTables(final ShardingSphereDatabase database, final LoadSingleTableStatement sqlStatement, final SingleRuleConfiguration currentRuleConfig, final String defaultSchemaName) {
+        Optional<SingleRule> currentSingleRule = database.getRuleMetaData().findSingleRule(SingleRule.class);
+        Collection<String> currentSingleTables = currentSingleRule.isPresent() ? currentSingleRule.get().getSingleTableDataNodes().keySet() : Collections.emptyList();
         Collection<SingleTableSegment> tableSegments = sqlStatement.getTables();
         boolean isSchemaSupportedDatabaseType = database.getProtocolType() instanceof SchemaSupportedDatabaseType;
         ShardingSphereSchema schema = database.getSchema(defaultSchemaName);
         for (SingleTableSegment each : tableSegments) {
-            checkTableRuleExist(currentRuleConfig, each);
             checkDatabaseTypeAndTableNodeStyle(isSchemaSupportedDatabaseType, each);
             if (SingleTableConstants.ASTERISK.equals(each.getTableName())) {
                 continue;
             }
-            ShardingSpherePreconditions.checkState(!schema.containsTable(each.getTableName()), () -> new TableExistsException(each.getTableName()));
+            boolean isNotSingleTable = schema.containsTable(each.getTableName()) && !currentSingleTables.contains(each.getTableName());
+            ShardingSpherePreconditions.checkState(isNotSingleTable, () -> new InvalidSingleRuleConfigurationException(String.format("Table `%s` existed and is not a single table in database `%s`",
+                    each.getTableName(), database.getName())));
         }
     }
     
-    private void checkTableRuleExist(final SingleRuleConfiguration currentRuleConfig, final SingleTableSegment segment) {
-        if (null == currentRuleConfig) {
-            return;
-        }
-        ShardingSpherePreconditions.checkState(!currentRuleConfig.getTables().contains(segment.toString()),
-                () -> new InvalidSingleRuleConfigurationException(String.format("Duplicated table definition `%s`", segment)));
-    }
-    
     private void checkDatabaseTypeAndTableNodeStyle(final boolean isSchemaSupportedDatabaseType, final SingleTableSegment singleTableSegment) {
         if (SingleTableConstants.ALL_TABLES.equals(singleTableSegment.toString()) || SingleTableConstants.ALL_SCHEMA_TABLES.equals(singleTableSegment.toString())) {
             return;
@@ -92,7 +89,10 @@ public final class LoadSingleTableStatementUpdater implements RuleDefinitionCrea
         }
     }
     
-    private Collection<String> getRequiredTables(final LoadSingleTableStatement sqlStatement) {
+    private Collection<String> getRequiredTables(final SingleRuleConfiguration currentRuleConfig, final LoadSingleTableStatement sqlStatement) {
+        if (null != currentRuleConfig) {
+            return sqlStatement.getTables().stream().map(SingleTableSegment::toString).filter(each -> !currentRuleConfig.getTables().contains(each)).collect(Collectors.toSet());
+        }
         return sqlStatement.getTables().stream().map(SingleTableSegment::toString).collect(Collectors.toSet());
     }
     
@@ -146,7 +146,7 @@ public final class LoadSingleTableStatementUpdater implements RuleDefinitionCrea
     @Override
     public SingleRuleConfiguration buildToBeCreatedRuleConfiguration(final SingleRuleConfiguration currentRuleConfig, final LoadSingleTableStatement sqlStatement) {
         SingleRuleConfiguration result = new SingleRuleConfiguration();
-        result.getTables().addAll(getRequiredTables(sqlStatement));
+        result.getTables().addAll(getRequiredTables(currentRuleConfig, sqlStatement));
         return result;
     }
     
diff --git a/proxy/backend/core/src/main/java/org/apache/shardingsphere/proxy/backend/handler/database/DropDatabaseBackendHandler.java b/proxy/backend/core/src/main/java/org/apache/shardingsphere/proxy/backend/handler/database/DropDatabaseBackendHandler.java
index 4e9aa3789d9..c780196358e 100644
--- a/proxy/backend/core/src/main/java/org/apache/shardingsphere/proxy/backend/handler/database/DropDatabaseBackendHandler.java
+++ b/proxy/backend/core/src/main/java/org/apache/shardingsphere/proxy/backend/handler/database/DropDatabaseBackendHandler.java
@@ -50,7 +50,9 @@ public final class DropDatabaseBackendHandler implements ProxyBackendHandler {
             checkSupportedDropCurrentDatabase(connectionSession);
             connectionSession.setCurrentDatabase(null);
         }
-        ProxyContext.getInstance().getContextManager().getInstanceContext().getModeContextManager().dropDatabase(sqlStatement.getDatabaseName());
+        if (ProxyContext.getInstance().databaseExists(sqlStatement.getDatabaseName())) {
+            ProxyContext.getInstance().getContextManager().getInstanceContext().getModeContextManager().dropDatabase(sqlStatement.getDatabaseName());
+        }
         return new UpdateResponseHeader(sqlStatement);
     }
     
diff --git a/proxy/backend/core/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/rule/NewRuleDefinitionBackendHandler.java b/proxy/backend/core/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/rule/NewRuleDefinitionBackendHandler.java
index f7adaa5ac5c..3ab8876e73d 100644
--- a/proxy/backend/core/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/rule/NewRuleDefinitionBackendHandler.java
+++ b/proxy/backend/core/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/rule/NewRuleDefinitionBackendHandler.java
@@ -23,6 +23,7 @@ import org.apache.shardingsphere.distsql.handler.update.RuleDefinitionDropUpdate
 import org.apache.shardingsphere.distsql.handler.update.RuleDefinitionUpdater;
 import org.apache.shardingsphere.distsql.parser.statement.rdl.RuleDefinitionStatement;
 import org.apache.shardingsphere.infra.config.rule.RuleConfiguration;
+import org.apache.shardingsphere.infra.config.rule.decorator.RuleConfigurationDecorator;
 import org.apache.shardingsphere.infra.metadata.database.ShardingSphereDatabase;
 import org.apache.shardingsphere.infra.metadata.version.MetaDataVersion;
 import org.apache.shardingsphere.infra.rule.identifier.type.StaticDataSourceContainedRule;
@@ -104,7 +105,7 @@ public final class NewRuleDefinitionBackendHandler<T extends RuleDefinitionState
         }
         if (sqlStatement instanceof LoadSingleTableStatement || sqlStatement instanceof SetDefaultSingleTableStorageUnitStatement) {
             return ProxyContext.getInstance().getContextManager().getInstanceContext().getModeContextManager().alterRuleConfiguration(database.getName(),
-                    null == currentRuleConfig ? toBeCreatedRuleConfig : currentRuleConfig);
+                    null == currentRuleConfig ? decorateRuleConfiguration(database, toBeCreatedRuleConfig) : decorateRuleConfiguration(database, currentRuleConfig));
         }
         return ProxyContext.getInstance().getContextManager().getInstanceContext().getModeContextManager().alterRuleConfiguration(database.getName(), toBeCreatedRuleConfig);
     }
@@ -114,13 +115,23 @@ public final class NewRuleDefinitionBackendHandler<T extends RuleDefinitionState
         RuleConfiguration toBeAlteredRuleConfig = updater.buildToBeAlteredRuleConfiguration(sqlStatement);
         updater.updateCurrentRuleConfiguration(currentRuleConfig, toBeAlteredRuleConfig);
         if (sqlStatement instanceof UnloadSingleTableStatement) {
-            return ProxyContext.getInstance().getContextManager().getInstanceContext().getModeContextManager().alterRuleConfiguration(database.getName(), currentRuleConfig);
+            return ProxyContext.getInstance().getContextManager().getInstanceContext().getModeContextManager()
+                    .alterRuleConfiguration(database.getName(), decorateRuleConfiguration(database, currentRuleConfig));
         }
         RuleConfiguration toBeDroppedRuleConfig = updater.buildToBeDroppedRuleConfiguration(currentRuleConfig, toBeAlteredRuleConfig);
         ProxyContext.getInstance().getContextManager().getInstanceContext().getModeContextManager().removeRuleConfiguration(database.getName(), toBeDroppedRuleConfig);
         return ProxyContext.getInstance().getContextManager().getInstanceContext().getModeContextManager().alterRuleConfiguration(database.getName(), toBeAlteredRuleConfig);
     }
     
+    @SuppressWarnings("unchecked")
+    private RuleConfiguration decorateRuleConfiguration(final ShardingSphereDatabase database, final RuleConfiguration ruleConfig) {
+        if (TypedSPILoader.contains(RuleConfigurationDecorator.class, ruleConfig.getClass().getName())) {
+            return TypedSPILoader.getService(RuleConfigurationDecorator.class, ruleConfig.getClass().getName()).decorate(database.getName(),
+                    database.getResourceMetaData().getDataSources(), database.getRuleMetaData().getRules(), ruleConfig);
+        }
+        return ruleConfig;
+    }
+    
     @SuppressWarnings({"rawtypes", "unchecked"})
     private Collection<MetaDataVersion> processDrop(final ShardingSphereDatabase database, final T sqlStatement, final RuleDefinitionDropUpdater updater, final RuleConfiguration currentRuleConfig) {
         if (!updater.hasAnyOneToBeDropped(sqlStatement, currentRuleConfig)) {
diff --git a/proxy/backend/core/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/rule/RuleDefinitionBackendHandler.java b/proxy/backend/core/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/rule/RuleDefinitionBackendHandler.java
index c4a925a0480..35c491077a5 100644
--- a/proxy/backend/core/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/rule/RuleDefinitionBackendHandler.java
+++ b/proxy/backend/core/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/rule/RuleDefinitionBackendHandler.java
@@ -23,6 +23,7 @@ import org.apache.shardingsphere.distsql.handler.update.RuleDefinitionDropUpdate
 import org.apache.shardingsphere.distsql.handler.update.RuleDefinitionUpdater;
 import org.apache.shardingsphere.distsql.parser.statement.rdl.RuleDefinitionStatement;
 import org.apache.shardingsphere.infra.config.rule.RuleConfiguration;
+import org.apache.shardingsphere.infra.config.rule.decorator.RuleConfigurationDecorator;
 import org.apache.shardingsphere.infra.metadata.database.ShardingSphereDatabase;
 import org.apache.shardingsphere.infra.rule.identifier.type.StaticDataSourceContainedRule;
 import org.apache.shardingsphere.infra.util.exception.external.sql.type.generic.UnsupportedSQLOperationException;
@@ -83,10 +84,12 @@ public final class RuleDefinitionBackendHandler<T extends RuleDefinitionStatemen
             if (null != currentRuleConfig) {
                 result.remove(currentRuleConfig);
             }
-            result.add(processCreate(sqlStatement, (RuleDefinitionCreateUpdater) updater, currentRuleConfig));
+            RuleConfiguration createdRuleConfig = processCreate(sqlStatement, (RuleDefinitionCreateUpdater) updater, currentRuleConfig);
+            result.add(decorateRuleConfiguration(database, createdRuleConfig));
         } else if (updater instanceof RuleDefinitionAlterUpdater) {
             result.remove(currentRuleConfig);
-            result.add(processAlter(sqlStatement, (RuleDefinitionAlterUpdater) updater, currentRuleConfig));
+            RuleConfiguration alteredRuleConfig = processAlter(sqlStatement, (RuleDefinitionAlterUpdater) updater, currentRuleConfig);
+            result.add(decorateRuleConfiguration(database, alteredRuleConfig));
         } else if (updater instanceof RuleDefinitionDropUpdater) {
             processDrop(database, result, sqlStatement, (RuleDefinitionDropUpdater) updater, currentRuleConfig);
         } else {
@@ -95,6 +98,15 @@ public final class RuleDefinitionBackendHandler<T extends RuleDefinitionStatemen
         return result;
     }
     
+    @SuppressWarnings("unchecked")
+    private RuleConfiguration decorateRuleConfiguration(final ShardingSphereDatabase database, final RuleConfiguration ruleConfig) {
+        if (TypedSPILoader.contains(RuleConfigurationDecorator.class, ruleConfig.getClass().getName())) {
+            return TypedSPILoader.getService(RuleConfigurationDecorator.class, ruleConfig.getClass().getName()).decorate(database.getName(),
+                    database.getResourceMetaData().getDataSources(), database.getRuleMetaData().getRules(), ruleConfig);
+        }
+        return ruleConfig;
+    }
+    
     @SuppressWarnings({"rawtypes", "unchecked"})
     private RuleConfiguration processCreate(final T sqlStatement, final RuleDefinitionCreateUpdater updater, final RuleConfiguration currentRuleConfig) {
         RuleConfiguration toBeCreatedRuleConfig = updater.buildToBeCreatedRuleConfiguration(currentRuleConfig, sqlStatement);
diff --git a/test/e2e/operation/pipeline/src/test/java/org/apache/shardingsphere/test/e2e/data/pipeline/cases/migration/general/MySQLTimeTypesMigrationE2EIT.java b/test/e2e/operation/pipeline/src/test/java/org/apache/shardingsphere/test/e2e/data/pipeline/cases/migration/general/MySQLTimeTypesMigrationE2EIT.java
index 1c36afa0a43..90cfc547f40 100644
--- a/test/e2e/operation/pipeline/src/test/java/org/apache/shardingsphere/test/e2e/data/pipeline/cases/migration/general/MySQLTimeTypesMigrationE2EIT.java
+++ b/test/e2e/operation/pipeline/src/test/java/org/apache/shardingsphere/test/e2e/data/pipeline/cases/migration/general/MySQLTimeTypesMigrationE2EIT.java
@@ -49,7 +49,6 @@ class MySQLTimeTypesMigrationE2EIT extends AbstractMigrationE2EIT {
                     + "`t_year` year DEFAULT NULL, PRIMARY KEY (`id`)) ENGINE=InnoDB;";
             containerComposer.sourceExecuteWithLog(sql);
             insertOneRecordWithZeroValue(containerComposer, 1);
-            loadAllSingleTables(containerComposer);
             addMigrationSourceResource(containerComposer);
             addMigrationTargetResource(containerComposer);
             startMigration(containerComposer, "time_e2e", "time_e2e");
@@ -57,6 +56,7 @@ class MySQLTimeTypesMigrationE2EIT extends AbstractMigrationE2EIT {
             containerComposer.waitJobPrepareSuccess(String.format("SHOW MIGRATION STATUS '%s'", jobId));
             insertOneRecordWithZeroValue(containerComposer, 2);
             containerComposer.waitIncrementTaskFinished(String.format("SHOW MIGRATION STATUS '%s'", jobId));
+            loadAllSingleTables(containerComposer);
             assertCheckMigrationSuccess(containerComposer, jobId, "DATA_MATCH");
         }
     }
diff --git a/test/e2e/operation/pipeline/src/test/java/org/apache/shardingsphere/test/e2e/data/pipeline/cases/migration/general/RulesMigrationE2EIT.java b/test/e2e/operation/pipeline/src/test/java/org/apache/shardingsphere/test/e2e/data/pipeline/cases/migration/general/RulesMigrationE2EIT.java
index cd85a19432d..9234d0bf518 100644
--- a/test/e2e/operation/pipeline/src/test/java/org/apache/shardingsphere/test/e2e/data/pipeline/cases/migration/general/RulesMigrationE2EIT.java
+++ b/test/e2e/operation/pipeline/src/test/java/org/apache/shardingsphere/test/e2e/data/pipeline/cases/migration/general/RulesMigrationE2EIT.java
@@ -55,7 +55,6 @@ class RulesMigrationE2EIT extends AbstractMigrationE2EIT {
     @ArgumentsSource(PipelineE2ETestCaseArgumentsProvider.class)
     void assertNoRuleMigrationSuccess(final PipelineTestParameter testParam) throws Exception {
         try (PipelineContainerComposer containerComposer = new PipelineContainerComposer(testParam, new MigrationJobType())) {
-            loadAllSingleTables(containerComposer);
             assertMigrationSuccess(containerComposer, null);
         }
     }
@@ -86,6 +85,7 @@ class RulesMigrationE2EIT extends AbstractMigrationE2EIT {
         String jobId = listJobId(containerComposer).get(0);
         containerComposer.waitJobPrepareSuccess(String.format("SHOW MIGRATION STATUS '%s'", jobId));
         containerComposer.waitIncrementTaskFinished(String.format("SHOW MIGRATION STATUS '%s'", jobId));
+        loadAllSingleTables(containerComposer);
         assertCheckMigrationSuccess(containerComposer, jobId, "DATA_MATCH");
         commitMigrationByJobId(containerComposer, jobId);
         assertThat(containerComposer.getTargetTableRecordsCount(containerComposer.getProxyDataSource(), SOURCE_TABLE_NAME), is(PipelineContainerComposer.TABLE_INIT_ROW_COUNT));
diff --git a/test/e2e/operation/transaction/src/test/resources/env/scenario/cursor/proxy/conf/opengauss/config-sharding.yaml b/test/e2e/operation/transaction/src/test/resources/env/scenario/cursor/proxy/conf/opengauss/config-sharding.yaml
index 8053acf6f09..2d9bfdb85ff 100644
--- a/test/e2e/operation/transaction/src/test/resources/env/scenario/cursor/proxy/conf/opengauss/config-sharding.yaml
+++ b/test/e2e/operation/transaction/src/test/resources/env/scenario/cursor/proxy/conf/opengauss/config-sharding.yaml
@@ -44,7 +44,7 @@ dataSources:
 rules:
   - !SINGLE
     tables:
-      - "*.*"
+      - "*.*.*"
   - !SHARDING
     tables:
       t_order:
diff --git a/test/e2e/operation/transaction/src/test/resources/env/scenario/cursor/proxy/conf/postgresql/rule.yaml b/test/e2e/operation/transaction/src/test/resources/env/scenario/cursor/proxy/conf/postgresql/rule.yaml
index 812a2d97129..88f4ddf5ec7 100644
--- a/test/e2e/operation/transaction/src/test/resources/env/scenario/cursor/proxy/conf/postgresql/rule.yaml
+++ b/test/e2e/operation/transaction/src/test/resources/env/scenario/cursor/proxy/conf/postgresql/rule.yaml
@@ -25,7 +25,7 @@ databaseName: sharding_db
 rules:
   - !SINGLE
     tables:
-      - "*.*"
+      - "*.*.*"
   - !SHARDING
     tables:
       t_order:
diff --git a/test/e2e/operation/transaction/src/test/resources/env/scenario/default/proxy/conf/mysql/config-sharding.yaml b/test/e2e/operation/transaction/src/test/resources/env/scenario/default/proxy/conf/mysql/config-sharding.yaml
index 92501536514..d0b3609ada5 100644
--- a/test/e2e/operation/transaction/src/test/resources/env/scenario/default/proxy/conf/mysql/config-sharding.yaml
+++ b/test/e2e/operation/transaction/src/test/resources/env/scenario/default/proxy/conf/mysql/config-sharding.yaml
@@ -42,6 +42,9 @@ dataSources:
     maxPoolSize: 2
     minPoolSize: 2
 rules:
+  - !SINGLE
+    tables:
+      - "*.*"
   - !SHARDING
     tables:
       t_order:
diff --git a/test/e2e/operation/transaction/src/test/resources/env/scenario/default/proxy/conf/opengauss/config-sharding.yaml b/test/e2e/operation/transaction/src/test/resources/env/scenario/default/proxy/conf/opengauss/config-sharding.yaml
index 358f23ddb3f..8c3956bf754 100644
--- a/test/e2e/operation/transaction/src/test/resources/env/scenario/default/proxy/conf/opengauss/config-sharding.yaml
+++ b/test/e2e/operation/transaction/src/test/resources/env/scenario/default/proxy/conf/opengauss/config-sharding.yaml
@@ -44,7 +44,7 @@ dataSources:
 rules:
   - !SINGLE
     tables:
-      - "*.*"
+      - "*.*.*"
   - !SHARDING
     tables:
       t_order:
diff --git a/test/e2e/operation/transaction/src/test/resources/env/scenario/default/proxy/conf/postgresql/config-sharding.yaml b/test/e2e/operation/transaction/src/test/resources/env/scenario/default/proxy/conf/postgresql/config-sharding.yaml
index f05c5ac3b78..8ac53f700db 100644
--- a/test/e2e/operation/transaction/src/test/resources/env/scenario/default/proxy/conf/postgresql/config-sharding.yaml
+++ b/test/e2e/operation/transaction/src/test/resources/env/scenario/default/proxy/conf/postgresql/config-sharding.yaml
@@ -42,6 +42,9 @@ dataSources:
     maxPoolSize: 2
     minPoolSize: 2
 rules:
+  - !SINGLE
+    tables:
+      - "*.*.*"
   - !SHARDING
     tables:
       t_order:
diff --git a/test/e2e/operation/transaction/src/test/resources/env/scenario/readwrite-splitting/proxy/conf/opengauss/config-readwrite-splitting.yaml b/test/e2e/operation/transaction/src/test/resources/env/scenario/readwrite-splitting/proxy/conf/opengauss/config-readwrite-splitting.yaml
index 16d396df7da..9d3151e0e5d 100644
--- a/test/e2e/operation/transaction/src/test/resources/env/scenario/readwrite-splitting/proxy/conf/opengauss/config-readwrite-splitting.yaml
+++ b/test/e2e/operation/transaction/src/test/resources/env/scenario/readwrite-splitting/proxy/conf/opengauss/config-readwrite-splitting.yaml
@@ -53,7 +53,7 @@ dataSources:
 rules:
   - !SINGLE
     tables:
-      - "*.*"
+      - "*.*.*"
   - !READWRITE_SPLITTING
     dataSources:
       readwrite_ds:
diff --git a/test/e2e/operation/transaction/src/test/resources/env/scenario/readwrite-splitting/proxy/conf/postgresql/config-readwrite-splitting.yaml b/test/e2e/operation/transaction/src/test/resources/env/scenario/readwrite-splitting/proxy/conf/postgresql/config-readwrite-splitting.yaml
index aa93e197ad9..0b1319f3035 100644
--- a/test/e2e/operation/transaction/src/test/resources/env/scenario/readwrite-splitting/proxy/conf/postgresql/config-readwrite-splitting.yaml
+++ b/test/e2e/operation/transaction/src/test/resources/env/scenario/readwrite-splitting/proxy/conf/postgresql/config-readwrite-splitting.yaml
@@ -53,7 +53,7 @@ dataSources:
 rules:
   - !SINGLE
     tables:
-      - "*.*"
+      - "*.*.*"
   - !READWRITE_SPLITTING
     dataSources:
       readwrite_ds:
diff --git a/test/e2e/sql/src/test/resources/cases/rdl/rdl-integration-create.xml b/test/e2e/sql/src/test/resources/cases/rdl/rdl-integration-create.xml
index 02fadb48567..7460c5603f4 100644
--- a/test/e2e/sql/src/test/resources/cases/rdl/rdl-integration-create.xml
+++ b/test/e2e/sql/src/test/resources/cases/rdl/rdl-integration-create.xml
@@ -64,6 +64,7 @@
             <destroy-sql sql="DROP SHARDING TABLE RULE t_order" />
         </assertion>
         <assertion expected-data-file="create_sharding_rules_show_tables.xml">
+            <initial-sql sql="LOAD SINGLE TABLE *.*;" />
             <assertion-sql sql="SHOW TABLES;" />
             <destroy-sql sql="DROP SHARDING TABLE RULE t_order" />
         </assertion>