You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@shardingsphere.apache.org by ji...@apache.org on 2022/11/10 04:38:57 UTC

[shardingsphere] branch master updated: Enhanced DistSQL verification of RDL type in storage and readwrite splitting (#22040)

This is an automated email from the ASF dual-hosted git repository.

jianglongtao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/shardingsphere.git


The following commit(s) were added to refs/heads/master by this push:
     new 71f4e14a05b Enhanced DistSQL verification of RDL type in storage and readwrite splitting (#22040)
71f4e14a05b is described below

commit 71f4e14a05b1709edad9a4e1d5bfda683a8de68b
Author: jiangML <10...@qq.com>
AuthorDate: Thu Nov 10 12:38:50 2022 +0800

    Enhanced DistSQL verification of RDL type in storage and readwrite splitting (#22040)
    
    * Enhanced distsql verification of rdl type in storage unit and readwrite-splitting
    
    * change resource to storage unit
    
    * change resource to storage unit
---
 .../ReadwriteSplittingRuleStatementChecker.java    | 149 +++++++++++++++++++--
 ...lterReadwriteSplittingRuleStatementUpdater.java |  77 +----------
 ...eateReadwriteSplittingRuleStatementUpdater.java |  87 +-----------
 ...DropReadwriteSplittingRuleStatementUpdater.java |  15 +--
 .../resource/AlterStorageUnitBackendHandler.java   |  36 ++---
 .../RegisterStorageUnitBackendHandler.java         |  20 ++-
 .../UnregisterStorageUnitBackendHandler.java       |   2 +-
 ...aultSingleTableStorageUnitStatementUpdater.java |   8 +-
 .../RegisterStorageUnitBackendHandlerTest.java     |  45 ++++++-
 9 files changed, 233 insertions(+), 206 deletions(-)

diff --git a/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/checker/ReadwriteSplittingRuleStatementChecker.java b/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/checker/ReadwriteSplittingRuleStatementChecker.java
index 39d1c84045d..fd4b6fea4e4 100644
--- a/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/checker/ReadwriteSplittingRuleStatementChecker.java
+++ b/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/checker/ReadwriteSplittingRuleStatementChecker.java
@@ -18,15 +18,30 @@
 package org.apache.shardingsphere.readwritesplitting.distsql.handler.checker;
 
 import com.google.common.base.Strings;
+import org.apache.shardingsphere.infra.distsql.constant.ExportableConstants;
+import org.apache.shardingsphere.infra.distsql.exception.resource.MissingRequiredResourcesException;
+import org.apache.shardingsphere.infra.distsql.exception.rule.DuplicateRuleException;
+import org.apache.shardingsphere.infra.distsql.exception.rule.InvalidAlgorithmConfigurationException;
 import org.apache.shardingsphere.infra.distsql.exception.rule.InvalidRuleConfigurationException;
+import org.apache.shardingsphere.infra.distsql.exception.rule.MissingRequiredRuleException;
+import org.apache.shardingsphere.infra.metadata.database.ShardingSphereDatabase;
+import org.apache.shardingsphere.infra.metadata.database.resource.ShardingSphereResourceMetaData;
+import org.apache.shardingsphere.infra.rule.identifier.type.exportable.ExportableRule;
+import org.apache.shardingsphere.infra.rule.identifier.type.exportable.RuleExportEngine;
 import org.apache.shardingsphere.infra.util.exception.ShardingSpherePreconditions;
 import org.apache.shardingsphere.readwritesplitting.api.ReadwriteSplittingRuleConfiguration;
 import org.apache.shardingsphere.readwritesplitting.api.rule.ReadwriteSplittingDataSourceRuleConfiguration;
 import org.apache.shardingsphere.readwritesplitting.distsql.parser.segment.ReadwriteSplittingRuleSegment;
+import org.apache.shardingsphere.readwritesplitting.factory.ReadQueryLoadBalanceAlgorithmFactory;
 
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
 import java.util.stream.Collectors;
 
 /**
@@ -35,15 +50,125 @@ import java.util.stream.Collectors;
 public final class ReadwriteSplittingRuleStatementChecker {
     
     /**
-     * Check duplicate resource names for readwrite-splitting rule statement.
+     * Check create readwrite splitting rule statement.
+     *
+     * @param database database
+     * @param segments segments
+     * @param currentRuleConfig current rule config
+     */
+    public static void checkCreation(final ShardingSphereDatabase database, final Collection<ReadwriteSplittingRuleSegment> segments, final ReadwriteSplittingRuleConfiguration currentRuleConfig) {
+        String databaseName = database.getName();
+        checkDuplicateRuleNames(databaseName, segments, currentRuleConfig, database.getResourceMetaData());
+        checkResourcesExist(databaseName, segments, database);
+        checkDuplicateResourceNames(databaseName, segments, currentRuleConfig, true);
+        checkLoadBalancers(segments);
+    }
+    
+    /**
+     *  Check alter readwrite splitting rule statement.
      * 
-     * @param databaseName database name
+     * @param database database
      * @param segments segments
      * @param currentRuleConfig current rule config
-     * @param isCreating whether is creating
      */
-    public static void checkDuplicateResourceNames(final String databaseName, final Collection<ReadwriteSplittingRuleSegment> segments,
-                                                   final ReadwriteSplittingRuleConfiguration currentRuleConfig, final boolean isCreating) {
+    public static void checkAlteration(final ShardingSphereDatabase database, final Collection<ReadwriteSplittingRuleSegment> segments, final ReadwriteSplittingRuleConfiguration currentRuleConfig) {
+        String databaseName = database.getName();
+        checkRuleConfigurationExist(database, currentRuleConfig);
+        checkDuplicateRuleNamesWithSelf(databaseName, segments);
+        checkRuleNamesExist(segments, currentRuleConfig, databaseName);
+        checkResourcesExist(databaseName, segments, database);
+        checkDuplicateResourceNames(databaseName, segments, currentRuleConfig, false);
+        checkLoadBalancers(segments);
+    }
+    
+    /**
+     *  Check current rule configuration exist.
+     * 
+     * @param database database
+     * @param currentRuleConfig current rule config
+     */
+    public static void checkRuleConfigurationExist(final ShardingSphereDatabase database, final ReadwriteSplittingRuleConfiguration currentRuleConfig) {
+        ShardingSpherePreconditions.checkNotNull(currentRuleConfig, () -> new MissingRequiredRuleException("Readwrite splitting", database.getName()));
+    }
+    
+    private static void checkRuleNamesExist(final Collection<ReadwriteSplittingRuleSegment> segments, final ReadwriteSplittingRuleConfiguration currentRuleConfig, final String databaseName) {
+        Collection<String> requiredRuleNames = segments.stream().map(ReadwriteSplittingRuleSegment::getName).collect(Collectors.toList());
+        Collection<String> currentRuleNames = currentRuleConfig.getDataSources().stream().map(ReadwriteSplittingDataSourceRuleConfiguration::getName).collect(Collectors.toList());
+        Collection<String> notExistRuleNames = requiredRuleNames.stream().filter(each -> !currentRuleNames.contains(each)).collect(Collectors.toSet());
+        ShardingSpherePreconditions.checkState(notExistRuleNames.isEmpty(), () -> new MissingRequiredRuleException(databaseName, notExistRuleNames));
+    }
+    
+    private static void checkDuplicateRuleNames(final String databaseName, final Collection<ReadwriteSplittingRuleSegment> segments,
+                                                final ReadwriteSplittingRuleConfiguration currentRuleConfig, final ShardingSphereResourceMetaData resourceMetaData) {
+        checkDuplicateRuleNamesWithSelf(databaseName, segments);
+        checkDuplicateRuleNamesWithResourceMetaData(segments, resourceMetaData);
+        checkDuplicateRuleNamesWithRuleConfiguration(databaseName, segments, currentRuleConfig);
+    }
+    
+    private static void checkDuplicateRuleNamesWithSelf(final String databaseName, final Collection<ReadwriteSplittingRuleSegment> segments) {
+        Collection<String> requiredRuleNames = segments.stream().map(ReadwriteSplittingRuleSegment::getName).collect(Collectors.toList());
+        Collection<String> duplicateRuleNames = getDuplicate(requiredRuleNames);
+        ShardingSpherePreconditions.checkState(duplicateRuleNames.isEmpty(), () -> new DuplicateRuleException("Readwrite splitting", databaseName, duplicateRuleNames));
+    }
+    
+    private static Collection<String> getDuplicate(final Collection<String> require) {
+        return require.stream().collect(Collectors.groupingBy(each -> each, Collectors.counting())).entrySet().stream()
+                .filter(each -> each.getValue() > 1).map(Map.Entry::getKey).collect(Collectors.toSet());
+    }
+    
+    private static void checkDuplicateRuleNamesWithResourceMetaData(final Collection<ReadwriteSplittingRuleSegment> segments, final ShardingSphereResourceMetaData resourceMetaData) {
+        Collection<String> currentRuleNames = new LinkedList<>();
+        if (null != resourceMetaData && null != resourceMetaData.getDataSources()) {
+            currentRuleNames.addAll(resourceMetaData.getDataSources().keySet());
+        }
+        Collection<String> duplicateRuleNames = segments.stream().map(ReadwriteSplittingRuleSegment::getName).filter(currentRuleNames::contains).collect(Collectors.toList());
+        ShardingSpherePreconditions.checkState(duplicateRuleNames.isEmpty(), () -> new InvalidRuleConfigurationException("Readwrite splitting", duplicateRuleNames,
+                Collections.singleton(String.format("%s already exists in storage unit", duplicateRuleNames))));
+    }
+    
+    private static void checkDuplicateRuleNamesWithRuleConfiguration(final String databaseName, final Collection<ReadwriteSplittingRuleSegment> segments,
+                                                                     final ReadwriteSplittingRuleConfiguration currentRuleConfig) {
+        Collection<String> currentRuleNames = new LinkedList<>();
+        if (null != currentRuleConfig) {
+            currentRuleNames.addAll(currentRuleConfig.getDataSources().stream().map(ReadwriteSplittingDataSourceRuleConfiguration::getName).collect(Collectors.toList()));
+        }
+        Collection<String> duplicateRuleNames = segments.stream().map(ReadwriteSplittingRuleSegment::getName).filter(currentRuleNames::contains).collect(Collectors.toList());
+        ShardingSpherePreconditions.checkState(duplicateRuleNames.isEmpty(), () -> new DuplicateRuleException("Readwrite splitting", databaseName, duplicateRuleNames));
+    }
+    
+    private static void checkResourcesExist(final String databaseName, final Collection<ReadwriteSplittingRuleSegment> segments, final ShardingSphereDatabase database) {
+        Collection<String> requireResources = new LinkedHashSet<>();
+        Collection<String> requireDiscoverableResources = new LinkedHashSet<>();
+        segments.forEach(each -> {
+            if (Strings.isNullOrEmpty(each.getAutoAwareResource())) {
+                requireResources.add(each.getWriteDataSource());
+                requireResources.addAll(each.getReadDataSources());
+            } else {
+                requireDiscoverableResources.add(each.getAutoAwareResource());
+            }
+        });
+        Collection<String> notExistResources = database.getResourceMetaData().getNotExistedResources(requireResources);
+        ShardingSpherePreconditions.checkState(notExistResources.isEmpty(), () -> new MissingRequiredResourcesException(databaseName, notExistResources));
+        Collection<String> logicResources = getLogicResources(database);
+        Collection<String> notExistLogicResources = requireDiscoverableResources.stream().filter(each -> !logicResources.contains(each)).collect(Collectors.toSet());
+        ShardingSpherePreconditions.checkState(notExistLogicResources.isEmpty(), () -> new MissingRequiredResourcesException(databaseName, notExistLogicResources));
+    }
+    
+    @SuppressWarnings("unchecked")
+    private static Collection<String> getLogicResources(final ShardingSphereDatabase database) {
+        Collection<String> result = new LinkedHashSet<>();
+        Optional<ExportableRule> exportableRule = database.getRuleMetaData().findRules(ExportableRule.class).stream()
+                .filter(each -> new RuleExportEngine(each).containExportableKey(Collections.singletonList(ExportableConstants.EXPORT_DB_DISCOVERY_PRIMARY_DATA_SOURCES))).findAny();
+        exportableRule.ifPresent(optional -> {
+            Map<String, Object> exportData = new RuleExportEngine(optional).export(Collections.singletonList(ExportableConstants.EXPORT_DB_DISCOVERY_PRIMARY_DATA_SOURCES));
+            Collection<String> logicResources = ((Map<String, String>) exportData.getOrDefault(ExportableConstants.EXPORT_DB_DISCOVERY_PRIMARY_DATA_SOURCES, Collections.emptyMap())).keySet();
+            result.addAll(logicResources);
+        });
+        return result;
+    }
+    
+    private static void checkDuplicateResourceNames(final String databaseName, final Collection<ReadwriteSplittingRuleSegment> segments,
+                                                    final ReadwriteSplittingRuleConfiguration currentRuleConfig, final boolean isCreating) {
         Collection<String> existedWriteDataSourceNames = new HashSet<>();
         Collection<String> existedReadDataSourceNames = new HashSet<>();
         if (null != currentRuleConfig) {
@@ -67,8 +192,8 @@ public final class ReadwriteSplittingRuleStatementChecker {
         for (final ReadwriteSplittingRuleSegment each : segments) {
             if (!Strings.isNullOrEmpty(each.getWriteDataSource())) {
                 String writeDataSource = each.getWriteDataSource();
-                ShardingSpherePreconditions.checkState(writeDataSourceNames.add(writeDataSource), () -> new InvalidRuleConfigurationException("readwrite splitting", each.getName(),
-                        String.format("Can not config duplicate write resource `%s` in database `%s`", writeDataSource, databaseName)));
+                ShardingSpherePreconditions.checkState(writeDataSourceNames.add(writeDataSource), () -> new InvalidRuleConfigurationException("Readwrite splitting", each.getName(),
+                        String.format("Can not config duplicate write storage unit `%s` in database `%s`", writeDataSource, databaseName)));
             }
         }
     }
@@ -78,10 +203,16 @@ public final class ReadwriteSplittingRuleStatementChecker {
         for (ReadwriteSplittingRuleSegment each : segments) {
             if (null != each.getReadDataSources()) {
                 for (String readDataSource : each.getReadDataSources()) {
-                    ShardingSpherePreconditions.checkState(readDataSourceNames.add(readDataSource), () -> new InvalidRuleConfigurationException("readwrite splitting", each.getName(),
-                            String.format("Can not config duplicate read resource `%s` in database `%s`", readDataSource, databaseName)));
+                    ShardingSpherePreconditions.checkState(readDataSourceNames.add(readDataSource), () -> new InvalidRuleConfigurationException("Readwrite splitting", each.getName(),
+                            String.format("Can not config duplicate read storage unit `%s` in database `%s`", readDataSource, databaseName)));
                 }
             }
         }
     }
+    
+    private static void checkLoadBalancers(final Collection<ReadwriteSplittingRuleSegment> segments) {
+        Collection<String> notExistedLoadBalancers = segments.stream().map(ReadwriteSplittingRuleSegment::getLoadBalancer).filter(Objects::nonNull).distinct()
+                .filter(each -> !ReadQueryLoadBalanceAlgorithmFactory.contains(each)).collect(Collectors.toSet());
+        ShardingSpherePreconditions.checkState(notExistedLoadBalancers.isEmpty(), () -> new InvalidAlgorithmConfigurationException("Load balancers", notExistedLoadBalancers));
+    }
 }
diff --git a/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/update/AlterReadwriteSplittingRuleStatementUpdater.java b/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/update/AlterReadwriteSplittingRuleStatementUpdater.java
index ceafd2be270..ffe64b08ae5 100644
--- a/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/update/AlterReadwriteSplittingRuleStatementUpdater.java
+++ b/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/update/AlterReadwriteSplittingRuleStatementUpdater.java
@@ -18,33 +18,16 @@
 package org.apache.shardingsphere.readwritesplitting.distsql.handler.update;
 
 import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
 import org.apache.shardingsphere.infra.config.rule.RuleConfiguration;
-import org.apache.shardingsphere.infra.distsql.constant.ExportableConstants;
-import org.apache.shardingsphere.infra.distsql.exception.resource.MissingRequiredResourcesException;
-import org.apache.shardingsphere.infra.distsql.exception.rule.InvalidAlgorithmConfigurationException;
-import org.apache.shardingsphere.infra.distsql.exception.rule.MissingRequiredRuleException;
 import org.apache.shardingsphere.infra.distsql.update.RuleDefinitionAlterUpdater;
 import org.apache.shardingsphere.infra.metadata.database.ShardingSphereDatabase;
-import org.apache.shardingsphere.infra.rule.identifier.type.exportable.ExportableRule;
-import org.apache.shardingsphere.infra.rule.identifier.type.exportable.RuleExportEngine;
-import org.apache.shardingsphere.infra.util.exception.ShardingSpherePreconditions;
 import org.apache.shardingsphere.readwritesplitting.api.ReadwriteSplittingRuleConfiguration;
 import org.apache.shardingsphere.readwritesplitting.api.rule.ReadwriteSplittingDataSourceRuleConfiguration;
 import org.apache.shardingsphere.readwritesplitting.distsql.handler.checker.ReadwriteSplittingRuleStatementChecker;
 import org.apache.shardingsphere.readwritesplitting.distsql.handler.converter.ReadwriteSplittingRuleStatementConverter;
-import org.apache.shardingsphere.readwritesplitting.distsql.parser.segment.ReadwriteSplittingRuleSegment;
 import org.apache.shardingsphere.readwritesplitting.distsql.parser.statement.AlterReadwriteSplittingRuleStatement;
-import org.apache.shardingsphere.readwritesplitting.factory.ReadQueryLoadBalanceAlgorithmFactory;
 
-import java.util.Collection;
-import java.util.Collections;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Objects;
 import java.util.Optional;
-import java.util.Set;
-import java.util.stream.Collectors;
 
 /**
  * Alter readwrite-splitting rule statement updater.
@@ -53,65 +36,7 @@ public final class AlterReadwriteSplittingRuleStatementUpdater implements RuleDe
     
     @Override
     public void checkSQLStatement(final ShardingSphereDatabase database, final AlterReadwriteSplittingRuleStatement sqlStatement, final ReadwriteSplittingRuleConfiguration currentRuleConfig) {
-        String databaseName = database.getName();
-        checkCurrentRuleConfiguration(databaseName, currentRuleConfig);
-        checkToBeAlteredRules(databaseName, sqlStatement, currentRuleConfig);
-        checkToBeAlteredResources(databaseName, sqlStatement, database);
-        // TODO move all check methods to checker
-        ReadwriteSplittingRuleStatementChecker.checkDuplicateResourceNames(databaseName, sqlStatement.getRules(), currentRuleConfig, false);
-        checkToBeAlteredLoadBalancer(sqlStatement);
-    }
-    
-    private void checkCurrentRuleConfiguration(final String databaseName, final ReadwriteSplittingRuleConfiguration currentRuleConfig) throws MissingRequiredRuleException {
-        ShardingSpherePreconditions.checkNotNull(currentRuleConfig, () -> new MissingRequiredRuleException("Readwrite splitting", databaseName));
-    }
-    
-    private void checkToBeAlteredRules(final String databaseName, final AlterReadwriteSplittingRuleStatement sqlStatement,
-                                       final ReadwriteSplittingRuleConfiguration currentRuleConfig) throws MissingRequiredRuleException {
-        Collection<String> currentRuleNames = currentRuleConfig.getDataSources().stream().map(ReadwriteSplittingDataSourceRuleConfiguration::getName).collect(Collectors.toSet());
-        Collection<String> notExistedRuleNames = getToBeAlteredRuleNames(sqlStatement).stream().filter(each -> !currentRuleNames.contains(each)).collect(Collectors.toList());
-        ShardingSpherePreconditions.checkState(notExistedRuleNames.isEmpty(), () -> new MissingRequiredRuleException("Readwrite splitting", databaseName, notExistedRuleNames));
-    }
-    
-    private Collection<String> getToBeAlteredRuleNames(final AlterReadwriteSplittingRuleStatement sqlStatement) {
-        return sqlStatement.getRules().stream().map(ReadwriteSplittingRuleSegment::getName).collect(Collectors.toSet());
-    }
-    
-    private void checkToBeAlteredLoadBalancer(final AlterReadwriteSplittingRuleStatement sqlStatement) throws InvalidAlgorithmConfigurationException {
-        Collection<String> invalidLoadBalancers = sqlStatement.getRules().stream().map(ReadwriteSplittingRuleSegment::getLoadBalancer).filter(Objects::nonNull).distinct()
-                .filter(each -> !ReadQueryLoadBalanceAlgorithmFactory.contains(each)).collect(Collectors.toList());
-        ShardingSpherePreconditions.checkState(invalidLoadBalancers.isEmpty(), () -> new InvalidAlgorithmConfigurationException("Load balancers", invalidLoadBalancers));
-    }
-    
-    private void checkToBeAlteredResources(final String databaseName, final AlterReadwriteSplittingRuleStatement sqlStatement, final ShardingSphereDatabase database) {
-        Collection<String> requireResources = new LinkedHashSet<>();
-        Collection<String> requireDiscoverableResources = new LinkedHashSet<>();
-        sqlStatement.getRules().forEach(each -> {
-            if (Strings.isNullOrEmpty(each.getAutoAwareResource())) {
-                requireResources.add(each.getWriteDataSource());
-                requireResources.addAll(each.getReadDataSources());
-            } else {
-                requireDiscoverableResources.add(each.getAutoAwareResource());
-            }
-        });
-        Collection<String> notExistResources = database.getResourceMetaData().getNotExistedResources(requireResources);
-        ShardingSpherePreconditions.checkState(notExistResources.isEmpty(), () -> new MissingRequiredResourcesException(databaseName, notExistResources));
-        Collection<String> logicResources = getLogicResources(database);
-        Set<String> notExistLogicResources = requireDiscoverableResources.stream().filter(each -> !logicResources.contains(each)).collect(Collectors.toSet());
-        ShardingSpherePreconditions.checkState(notExistLogicResources.isEmpty(), () -> new MissingRequiredResourcesException(databaseName, notExistLogicResources));
-    }
-    
-    @SuppressWarnings("unchecked")
-    private Collection<String> getLogicResources(final ShardingSphereDatabase database) {
-        Collection<String> result = new LinkedHashSet<>();
-        Optional<ExportableRule> exportableRule = database.getRuleMetaData().findRules(ExportableRule.class).stream()
-                .filter(each -> new RuleExportEngine(each).containExportableKey(Collections.singletonList(ExportableConstants.EXPORT_DB_DISCOVERY_PRIMARY_DATA_SOURCES))).findAny();
-        exportableRule.ifPresent(optional -> {
-            Map<String, Object> exportData = new RuleExportEngine(optional).export(Collections.singletonList(ExportableConstants.EXPORT_DB_DISCOVERY_PRIMARY_DATA_SOURCES));
-            Set<String> logicResources = ((Map<String, String>) exportData.getOrDefault(ExportableConstants.EXPORT_DB_DISCOVERY_PRIMARY_DATA_SOURCES, Collections.emptyMap())).keySet();
-            result.addAll(logicResources);
-        });
-        return result;
+        ReadwriteSplittingRuleStatementChecker.checkAlteration(database, sqlStatement.getRules(), currentRuleConfig);
     }
     
     @Override
diff --git a/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/update/CreateReadwriteSplittingRuleStatementUpdater.java b/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/update/CreateReadwriteSplittingRuleStatementUpdater.java
index e06218a62cf..5e4c8d6483c 100644
--- a/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/update/CreateReadwriteSplittingRuleStatementUpdater.java
+++ b/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/update/CreateReadwriteSplittingRuleStatementUpdater.java
@@ -17,34 +17,12 @@
 
 package org.apache.shardingsphere.readwritesplitting.distsql.handler.update;
 
-import com.google.common.base.Strings;
-import org.apache.shardingsphere.infra.distsql.constant.ExportableConstants;
-import org.apache.shardingsphere.infra.distsql.exception.resource.MissingRequiredResourcesException;
-import org.apache.shardingsphere.infra.distsql.exception.rule.DuplicateRuleException;
-import org.apache.shardingsphere.infra.distsql.exception.rule.InvalidAlgorithmConfigurationException;
-import org.apache.shardingsphere.infra.distsql.exception.rule.InvalidRuleConfigurationException;
 import org.apache.shardingsphere.infra.distsql.update.RuleDefinitionCreateUpdater;
 import org.apache.shardingsphere.infra.metadata.database.ShardingSphereDatabase;
-import org.apache.shardingsphere.infra.metadata.database.resource.ShardingSphereResourceMetaData;
-import org.apache.shardingsphere.infra.rule.identifier.type.exportable.ExportableRule;
-import org.apache.shardingsphere.infra.rule.identifier.type.exportable.RuleExportEngine;
-import org.apache.shardingsphere.infra.util.exception.ShardingSpherePreconditions;
 import org.apache.shardingsphere.readwritesplitting.api.ReadwriteSplittingRuleConfiguration;
-import org.apache.shardingsphere.readwritesplitting.api.rule.ReadwriteSplittingDataSourceRuleConfiguration;
 import org.apache.shardingsphere.readwritesplitting.distsql.handler.checker.ReadwriteSplittingRuleStatementChecker;
 import org.apache.shardingsphere.readwritesplitting.distsql.handler.converter.ReadwriteSplittingRuleStatementConverter;
-import org.apache.shardingsphere.readwritesplitting.distsql.parser.segment.ReadwriteSplittingRuleSegment;
 import org.apache.shardingsphere.readwritesplitting.distsql.parser.statement.CreateReadwriteSplittingRuleStatement;
-import org.apache.shardingsphere.readwritesplitting.factory.ReadQueryLoadBalanceAlgorithmFactory;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.stream.Collectors;
 
 /**
  * Create readwrite-splitting rule statement updater.
@@ -53,70 +31,7 @@ public final class CreateReadwriteSplittingRuleStatementUpdater implements RuleD
     
     @Override
     public void checkSQLStatement(final ShardingSphereDatabase database, final CreateReadwriteSplittingRuleStatement sqlStatement, final ReadwriteSplittingRuleConfiguration currentRuleConfig) {
-        String databaseName = database.getName();
-        checkDuplicateRuleNames(databaseName, sqlStatement, currentRuleConfig, database.getResourceMetaData());
-        checkToBeCreatedResources(databaseName, sqlStatement, database);
-        // TODO move all check methods to checker
-        ReadwriteSplittingRuleStatementChecker.checkDuplicateResourceNames(databaseName, sqlStatement.getRules(), currentRuleConfig, true);
-        checkToBeCreatedLoadBalancers(sqlStatement);
-    }
-    
-    private void checkDuplicateRuleNames(final String databaseName, final CreateReadwriteSplittingRuleStatement sqlStatement,
-                                         final ReadwriteSplittingRuleConfiguration currentRuleConfig, final ShardingSphereResourceMetaData resourceMetaData) {
-        Collection<String> currentRuleNames = new LinkedList<>();
-        if (null != resourceMetaData && null != resourceMetaData.getDataSources()) {
-            currentRuleNames.addAll(resourceMetaData.getDataSources().keySet());
-        }
-        Collection<String> duplicateRuleNames = sqlStatement.getRules().stream().map(ReadwriteSplittingRuleSegment::getName).filter(currentRuleNames::contains).collect(Collectors.toList());
-        if (!duplicateRuleNames.isEmpty()) {
-            throw new InvalidRuleConfigurationException("readwrite splitting", duplicateRuleNames, Collections.singleton(String.format("%s already exists in resource", duplicateRuleNames)));
-        }
-        if (null != currentRuleConfig) {
-            currentRuleNames.addAll(currentRuleConfig.getDataSources().stream().map(ReadwriteSplittingDataSourceRuleConfiguration::getName).collect(Collectors.toList()));
-        }
-        duplicateRuleNames = sqlStatement.getRules().stream().map(ReadwriteSplittingRuleSegment::getName).filter(currentRuleNames::contains).collect(Collectors.toList());
-        if (!duplicateRuleNames.isEmpty()) {
-            throw new DuplicateRuleException("readwrite splitting", databaseName, duplicateRuleNames);
-        }
-    }
-    
-    private void checkToBeCreatedResources(final String databaseName, final CreateReadwriteSplittingRuleStatement sqlStatement, final ShardingSphereDatabase database) {
-        Collection<String> requireResources = new LinkedHashSet<>();
-        Collection<String> requireDiscoverableResources = new LinkedHashSet<>();
-        sqlStatement.getRules().forEach(each -> {
-            if (Strings.isNullOrEmpty(each.getAutoAwareResource())) {
-                requireResources.add(each.getWriteDataSource());
-                requireResources.addAll(each.getReadDataSources());
-            } else {
-                requireDiscoverableResources.add(each.getAutoAwareResource());
-            }
-        });
-        Collection<String> notExistResources = database.getResourceMetaData().getNotExistedResources(requireResources);
-        ShardingSpherePreconditions.checkState(notExistResources.isEmpty(), () -> new MissingRequiredResourcesException(databaseName, notExistResources));
-        Collection<String> logicResources = getLogicResources(database);
-        Collection<String> notExistLogicResources = requireDiscoverableResources.stream().filter(each -> !logicResources.contains(each)).collect(Collectors.toSet());
-        ShardingSpherePreconditions.checkState(notExistLogicResources.isEmpty(), () -> new MissingRequiredResourcesException(databaseName, notExistLogicResources));
-    }
-    
-    @SuppressWarnings("unchecked")
-    private Collection<String> getLogicResources(final ShardingSphereDatabase database) {
-        Collection<String> result = new LinkedHashSet<>();
-        Optional<ExportableRule> exportableRule = database.getRuleMetaData().findRules(ExportableRule.class).stream()
-                .filter(each -> new RuleExportEngine(each).containExportableKey(Collections.singletonList(ExportableConstants.EXPORT_DB_DISCOVERY_PRIMARY_DATA_SOURCES))).findAny();
-        exportableRule.ifPresent(optional -> {
-            Map<String, Object> exportData = new RuleExportEngine(optional).export(Collections.singletonList(ExportableConstants.EXPORT_DB_DISCOVERY_PRIMARY_DATA_SOURCES));
-            Collection<String> logicResources = ((Map<String, String>) exportData.getOrDefault(ExportableConstants.EXPORT_DB_DISCOVERY_PRIMARY_DATA_SOURCES, Collections.emptyMap())).keySet();
-            result.addAll(logicResources);
-        });
-        return result;
-    }
-    
-    private void checkToBeCreatedLoadBalancers(final CreateReadwriteSplittingRuleStatement sqlStatement) throws InvalidAlgorithmConfigurationException {
-        Collection<String> notExistedLoadBalancers = sqlStatement.getRules().stream().map(ReadwriteSplittingRuleSegment::getLoadBalancer).filter(Objects::nonNull).distinct()
-                .filter(each -> !ReadQueryLoadBalanceAlgorithmFactory.contains(each)).collect(Collectors.toList());
-        if (!notExistedLoadBalancers.isEmpty()) {
-            throw new InvalidAlgorithmConfigurationException("Load balancers", notExistedLoadBalancers);
-        }
+        ReadwriteSplittingRuleStatementChecker.checkCreation(database, sqlStatement.getRules(), currentRuleConfig);
     }
     
     @Override
diff --git a/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/update/DropReadwriteSplittingRuleStatementUpdater.java b/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/update/DropReadwriteSplittingRuleStatementUpdater.java
index eba9bacf644..643be82232d 100644
--- a/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/update/DropReadwriteSplittingRuleStatementUpdater.java
+++ b/features/readwrite-splitting/distsql/handler/src/main/java/org/apache/shardingsphere/readwritesplitting/distsql/handler/update/DropReadwriteSplittingRuleStatementUpdater.java
@@ -28,6 +28,7 @@ import org.apache.shardingsphere.infra.rule.identifier.type.DataSourceContainedR
 import org.apache.shardingsphere.infra.util.exception.ShardingSpherePreconditions;
 import org.apache.shardingsphere.readwritesplitting.api.ReadwriteSplittingRuleConfiguration;
 import org.apache.shardingsphere.readwritesplitting.api.rule.ReadwriteSplittingDataSourceRuleConfiguration;
+import org.apache.shardingsphere.readwritesplitting.distsql.handler.checker.ReadwriteSplittingRuleStatementChecker;
 import org.apache.shardingsphere.readwritesplitting.distsql.parser.statement.DropReadwriteSplittingRuleStatement;
 import org.apache.shardingsphere.readwritesplitting.rule.ReadwriteSplittingRule;
 
@@ -49,15 +50,11 @@ public final class DropReadwriteSplittingRuleStatementUpdater implements RuleDef
             return;
         }
         String databaseName = database.getName();
-        checkCurrentRuleConfiguration(databaseName, currentRuleConfig);
+        ReadwriteSplittingRuleStatementChecker.checkRuleConfigurationExist(database, currentRuleConfig);
         checkToBeDroppedRuleNames(databaseName, sqlStatement, currentRuleConfig);
         checkToBeDroppedInUsed(database, sqlStatement);
     }
     
-    private void checkCurrentRuleConfiguration(final String databaseName, final ReadwriteSplittingRuleConfiguration currentRuleConfig) throws MissingRequiredRuleException {
-        ShardingSpherePreconditions.checkNotNull(currentRuleConfig, () -> new MissingRequiredRuleException("Readwrite splitting", databaseName));
-    }
-    
     private void checkToBeDroppedRuleNames(final String databaseName, final DropReadwriteSplittingRuleStatement sqlStatement,
                                            final ReadwriteSplittingRuleConfiguration currentRuleConfig) throws MissingRequiredRuleException {
         if (sqlStatement.isIfExists()) {
@@ -65,17 +62,13 @@ public final class DropReadwriteSplittingRuleStatementUpdater implements RuleDef
         }
         Collection<String> currentRuleNames = currentRuleConfig.getDataSources().stream().map(ReadwriteSplittingDataSourceRuleConfiguration::getName).collect(Collectors.toList());
         Collection<String> notExistedRuleNames = sqlStatement.getRuleNames().stream().filter(each -> !currentRuleNames.contains(each)).collect(Collectors.toList());
-        if (!notExistedRuleNames.isEmpty()) {
-            throw new MissingRequiredRuleException("Readwrite splitting", databaseName, sqlStatement.getRuleNames());
-        }
+        ShardingSpherePreconditions.checkState(notExistedRuleNames.isEmpty(), () -> new MissingRequiredRuleException("Readwrite splitting", databaseName, sqlStatement.getRuleNames()));
     }
     
     private void checkToBeDroppedInUsed(final ShardingSphereDatabase database, final DropReadwriteSplittingRuleStatement sqlStatement) throws RuleInUsedException {
         Collection<String> resourceBeUsed = getInUsedResources(database);
         Collection<String> ruleInUsed = sqlStatement.getRuleNames().stream().filter(resourceBeUsed::contains).collect(Collectors.toSet());
-        if (!ruleInUsed.isEmpty()) {
-            throw new RuleInUsedException("Readwrite splitting", database.getName(), ruleInUsed);
-        }
+        ShardingSpherePreconditions.checkState(ruleInUsed.isEmpty(), () -> new RuleInUsedException("Readwrite splitting", database.getName(), ruleInUsed));
     }
     
     private Collection<String> getInUsedResources(final ShardingSphereDatabase database) {
diff --git a/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/AlterStorageUnitBackendHandler.java b/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/AlterStorageUnitBackendHandler.java
index b6816e21759..468e47ea435 100644
--- a/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/AlterStorageUnitBackendHandler.java
+++ b/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/AlterStorageUnitBackendHandler.java
@@ -46,6 +46,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Objects;
 import java.util.stream.Collectors;
 
 /**
@@ -72,43 +73,43 @@ public final class AlterStorageUnitBackendHandler extends DatabaseRequiredBacken
         try {
             ProxyContext.getInstance().getContextManager().updateResources(databaseName, dataSourcePropsMap);
         } catch (final SQLException | ShardingSphereServerException ex) {
-            log.error("Alter resource failed", ex);
+            log.error("Alter storage unit failed", ex);
             throw new InvalidResourcesException(Collections.singleton(ex.getMessage()));
         }
         return new UpdateResponseHeader(sqlStatement);
     }
     
     private void checkSQLStatement(final String databaseName, final AlterStorageUnitStatement sqlStatement) {
-        Collection<String> toBeAlteredResourceNames = getToBeAlteredResourceNames(sqlStatement);
-        checkToBeAlteredDuplicateResourceNames(toBeAlteredResourceNames);
-        checkResourceNameExisted(databaseName, toBeAlteredResourceNames);
+        Collection<String> toBeAlteredStorageUnitNames = getToBeAlteredStorageUnitNames(sqlStatement);
+        checkToBeAlteredDuplicateStorageUnitNames(toBeAlteredStorageUnitNames);
+        checkStorageUnitNameExisted(databaseName, toBeAlteredStorageUnitNames);
         checkDatabase(databaseName, sqlStatement);
     }
     
     private void checkDatabase(final String databaseName, final AlterStorageUnitStatement sqlStatement) {
-        Map<String, DataSource> resources = ProxyContext.getInstance().getDatabase(databaseName).getResourceMetaData().getDataSources();
+        Map<String, DataSource> storageUnits = ProxyContext.getInstance().getDatabase(databaseName).getResourceMetaData().getDataSources();
         Collection<String> invalid = sqlStatement.getDataSources().stream().collect(Collectors.toMap(DataSourceSegment::getName, each -> each)).entrySet().stream()
-                .filter(each -> !isIdenticalDatabase(each.getValue(), resources.get(each.getKey()))).map(Entry::getKey).collect(Collectors.toSet());
+                .filter(each -> !isIdenticalDatabase(each.getValue(), storageUnits.get(each.getKey()))).map(Entry::getKey).collect(Collectors.toSet());
         ShardingSpherePreconditions.checkState(invalid.isEmpty(), () -> new InvalidResourcesException(Collections.singleton(String.format("Cannot alter the database of %s", invalid))));
     }
     
-    private Collection<String> getToBeAlteredResourceNames(final AlterStorageUnitStatement sqlStatement) {
+    private Collection<String> getToBeAlteredStorageUnitNames(final AlterStorageUnitStatement sqlStatement) {
         return sqlStatement.getDataSources().stream().map(DataSourceSegment::getName).collect(Collectors.toList());
     }
     
-    private void checkToBeAlteredDuplicateResourceNames(final Collection<String> resourceNames) {
-        Collection<String> duplicateResourceNames = getDuplicateResourceNames(resourceNames);
-        ShardingSpherePreconditions.checkState(duplicateResourceNames.isEmpty(), () -> new DuplicateResourceException(duplicateResourceNames));
+    private void checkToBeAlteredDuplicateStorageUnitNames(final Collection<String> storageUnitNames) {
+        Collection<String> duplicateStorageUnitNames = getDuplicateStorageUnitNames(storageUnitNames);
+        ShardingSpherePreconditions.checkState(duplicateStorageUnitNames.isEmpty(), () -> new DuplicateResourceException(duplicateStorageUnitNames));
     }
     
-    private Collection<String> getDuplicateResourceNames(final Collection<String> resourceNames) {
-        return resourceNames.stream().filter(each -> resourceNames.stream().filter(each::equals).count() > 1).collect(Collectors.toList());
+    private Collection<String> getDuplicateStorageUnitNames(final Collection<String> storageUnitNames) {
+        return storageUnitNames.stream().filter(each -> storageUnitNames.stream().filter(each::equals).count() > 1).collect(Collectors.toList());
     }
     
-    private void checkResourceNameExisted(final String databaseName, final Collection<String> resourceNames) {
-        Map<String, DataSource> resources = ProxyContext.getInstance().getDatabase(databaseName).getResourceMetaData().getDataSources();
-        Collection<String> notExistedResourceNames = resourceNames.stream().filter(each -> !resources.containsKey(each)).collect(Collectors.toList());
-        ShardingSpherePreconditions.checkState(notExistedResourceNames.isEmpty(), () -> new MissingRequiredResourcesException(databaseName, notExistedResourceNames));
+    private void checkStorageUnitNameExisted(final String databaseName, final Collection<String> storageUnitNames) {
+        Map<String, DataSource> storageUnits = ProxyContext.getInstance().getDatabase(databaseName).getResourceMetaData().getDataSources();
+        Collection<String> notExistedStorageUnitNames = storageUnitNames.stream().filter(each -> !storageUnits.containsKey(each)).collect(Collectors.toList());
+        ShardingSpherePreconditions.checkState(notExistedStorageUnitNames.isEmpty(), () -> new MissingRequiredResourcesException(databaseName, notExistedStorageUnitNames));
     }
     
     private boolean isIdenticalDatabase(final DataSourceSegment segment, final DataSource dataSource) {
@@ -128,6 +129,7 @@ public final class AlterStorageUnitBackendHandler extends DatabaseRequiredBacken
         }
         String url = String.valueOf(DataSourcePropertiesCreator.create(dataSource).getConnectionPropertySynonyms().getStandardProperties().get("url"));
         JdbcUrl dataSourceJdbcUrl = new StandardJdbcUrlParser().parse(url);
-        return hostName.equals(dataSourceJdbcUrl.getHostname()) && port.equals(String.valueOf(dataSourceJdbcUrl.getPort())) && database.equals(dataSourceJdbcUrl.getDatabase());
+        return Objects.equals(hostName, dataSourceJdbcUrl.getHostname()) && Objects.equals(port, String.valueOf(dataSourceJdbcUrl.getPort()))
+                && Objects.equals(database, dataSourceJdbcUrl.getDatabase());
     }
 }
diff --git a/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/RegisterStorageUnitBackendHandler.java b/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/RegisterStorageUnitBackendHandler.java
index f2279b4dc3b..13be0fc6d7d 100644
--- a/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/RegisterStorageUnitBackendHandler.java
+++ b/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/RegisterStorageUnitBackendHandler.java
@@ -32,6 +32,9 @@ import org.apache.shardingsphere.proxy.backend.handler.DatabaseRequiredBackendHa
 import org.apache.shardingsphere.proxy.backend.response.header.ResponseHeader;
 import org.apache.shardingsphere.proxy.backend.response.header.update.UpdateResponseHeader;
 import org.apache.shardingsphere.proxy.backend.session.ConnectionSession;
+import org.apache.shardingsphere.readwritesplitting.api.ReadwriteSplittingRuleConfiguration;
+import org.apache.shardingsphere.readwritesplitting.api.rule.ReadwriteSplittingDataSourceRuleConfiguration;
+import org.apache.shardingsphere.readwritesplitting.rule.ReadwriteSplittingRule;
 import org.apache.shardingsphere.sharding.distsql.handler.converter.ResourceSegmentsConverter;
 
 import java.sql.SQLException;
@@ -40,6 +43,8 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Collectors;
 
 /**
  * Register storage unit backend handler.
@@ -65,7 +70,7 @@ public final class RegisterStorageUnitBackendHandler extends DatabaseRequiredBac
         try {
             ProxyContext.getInstance().getContextManager().addResources(databaseName, dataSourcePropsMap);
         } catch (final SQLException | ShardingSphereServerException ex) {
-            log.error("Add resource failed", ex);
+            log.error("Register storage unit failed", ex);
             throw new InvalidResourcesException(Collections.singleton(ex.getMessage()));
         }
         return new UpdateResponseHeader(sqlStatement);
@@ -81,5 +86,18 @@ public final class RegisterStorageUnitBackendHandler extends DatabaseRequiredBac
             dataSourceNames.add(each.getName());
         }
         ShardingSpherePreconditions.checkState(duplicateDataSourceNames.isEmpty(), () -> new DuplicateResourceException(duplicateDataSourceNames));
+        checkDuplicateDataSourceNameWithReadwriteSplittingRule(databaseName, dataSourceNames);
+    }
+    
+    private void checkDuplicateDataSourceNameWithReadwriteSplittingRule(final String databaseName, final Collection<String> requiredDataSourceNames) {
+        Optional<ReadwriteSplittingRule> readwriteSplittingRule = ProxyContext.getInstance().getDatabase(databaseName).getRuleMetaData().findSingleRule(ReadwriteSplittingRule.class);
+        if (!readwriteSplittingRule.isPresent()) {
+            return;
+        }
+        ReadwriteSplittingRuleConfiguration config = (ReadwriteSplittingRuleConfiguration) readwriteSplittingRule.get().getConfiguration();
+        Collection<String> existRuleNames = config.getDataSources().stream().map(ReadwriteSplittingDataSourceRuleConfiguration::getName).collect(Collectors.toSet());
+        Collection<String> duplicateDataSourceNames = requiredDataSourceNames.stream().filter(each -> existRuleNames.contains(each)).collect(Collectors.toSet());
+        ShardingSpherePreconditions.checkState(duplicateDataSourceNames.isEmpty(),
+                () -> new InvalidResourcesException(Collections.singleton(String.format("%s already exists in readwrite splitting", duplicateDataSourceNames))));
     }
 }
diff --git a/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/UnregisterStorageUnitBackendHandler.java b/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/UnregisterStorageUnitBackendHandler.java
index 7b6658b7a6d..063a2c456d3 100644
--- a/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/UnregisterStorageUnitBackendHandler.java
+++ b/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/UnregisterStorageUnitBackendHandler.java
@@ -62,7 +62,7 @@ public final class UnregisterStorageUnitBackendHandler extends DatabaseRequiredB
         try {
             ProxyContext.getInstance().getContextManager().dropResources(databaseName, toBeDroppedResourceNames);
         } catch (final SQLException | ShardingSphereServerException ex) {
-            log.error("Drop resource failed", ex);
+            log.error("Unregister storage unit failed", ex);
             throw new InvalidResourcesException(Collections.singleton(ex.getMessage()));
         }
         return new UpdateResponseHeader(sqlStatement);
diff --git a/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/rule/SetDefaultSingleTableStorageUnitStatementUpdater.java b/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/rule/SetDefaultSingleTableStorageUnitStatementUpdater.java
index 8a64e7ed5a1..c0ff141a77f 100644
--- a/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/rule/SetDefaultSingleTableStorageUnitStatementUpdater.java
+++ b/proxy/backend/src/main/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/rule/SetDefaultSingleTableStorageUnitStatementUpdater.java
@@ -35,13 +35,13 @@ public final class SetDefaultSingleTableStorageUnitStatementUpdater implements R
     
     @Override
     public void checkSQLStatement(final ShardingSphereDatabase database, final SetDefaultSingleTableStorageUnitStatement sqlStatement, final SingleTableRuleConfiguration currentRuleConfig) {
-        checkResourceExist(database, sqlStatement);
+        checkStorageUnitExist(database, sqlStatement);
     }
     
-    private void checkResourceExist(final ShardingSphereDatabase database, final SetDefaultSingleTableStorageUnitStatement sqlStatement) {
+    private void checkStorageUnitExist(final ShardingSphereDatabase database, final SetDefaultSingleTableStorageUnitStatement sqlStatement) {
         if (StringUtils.isNotBlank(sqlStatement.getDefaultStorageUnit())) {
-            Collection<String> resourceNames = database.getResourceMetaData().getDataSources().keySet();
-            ShardingSpherePreconditions.checkState(resourceNames.contains(sqlStatement.getDefaultStorageUnit()),
+            Collection<String> storageUnitNames = database.getResourceMetaData().getDataSources().keySet();
+            ShardingSpherePreconditions.checkState(storageUnitNames.contains(sqlStatement.getDefaultStorageUnit()),
                     () -> new MissingRequiredResourcesException(database.getName(), Collections.singleton(sqlStatement.getDefaultStorageUnit())));
         }
     }
diff --git a/proxy/backend/src/test/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/RegisterStorageUnitBackendHandlerTest.java b/proxy/backend/src/test/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/RegisterStorageUnitBackendHandlerTest.java
index cc6c26e1252..24208a7ccd6 100644
--- a/proxy/backend/src/test/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/RegisterStorageUnitBackendHandlerTest.java
+++ b/proxy/backend/src/test/java/org/apache/shardingsphere/proxy/backend/handler/distsql/rdl/resource/RegisterStorageUnitBackendHandlerTest.java
@@ -24,8 +24,10 @@ import org.apache.shardingsphere.distsql.parser.statement.rdl.create.RegisterSto
 import org.apache.shardingsphere.infra.database.type.dialect.MySQLDatabaseType;
 import org.apache.shardingsphere.infra.datasource.props.DataSourcePropertiesValidator;
 import org.apache.shardingsphere.infra.distsql.exception.resource.DuplicateResourceException;
+import org.apache.shardingsphere.infra.distsql.exception.resource.InvalidResourcesException;
 import org.apache.shardingsphere.infra.metadata.database.ShardingSphereDatabase;
 import org.apache.shardingsphere.infra.metadata.database.resource.ShardingSphereResourceMetaData;
+import org.apache.shardingsphere.infra.metadata.database.rule.ShardingSphereRuleMetaData;
 import org.apache.shardingsphere.mode.manager.ContextManager;
 import org.apache.shardingsphere.mode.metadata.MetaDataContexts;
 import org.apache.shardingsphere.proxy.backend.context.ProxyContext;
@@ -33,6 +35,9 @@ import org.apache.shardingsphere.proxy.backend.response.header.ResponseHeader;
 import org.apache.shardingsphere.proxy.backend.response.header.update.UpdateResponseHeader;
 import org.apache.shardingsphere.proxy.backend.session.ConnectionSession;
 import org.apache.shardingsphere.proxy.backend.util.ProxyContextRestorer;
+import org.apache.shardingsphere.readwritesplitting.api.ReadwriteSplittingRuleConfiguration;
+import org.apache.shardingsphere.readwritesplitting.api.rule.ReadwriteSplittingDataSourceRuleConfiguration;
+import org.apache.shardingsphere.readwritesplitting.rule.ReadwriteSplittingRule;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -44,6 +49,7 @@ import java.lang.reflect.Field;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.LinkedList;
+import java.util.Optional;
 import java.util.Properties;
 
 import static org.hamcrest.CoreMatchers.instanceOf;
@@ -73,6 +79,12 @@ public final class RegisterStorageUnitBackendHandlerTest extends ProxyContextRes
     @Mock
     private ShardingSphereResourceMetaData resourceMetaData;
     
+    @Mock
+    private ShardingSphereRuleMetaData ruleMetaData;
+    
+    @Mock
+    private ReadwriteSplittingRule readwriteSplittingRule;
+    
     private RegisterStorageUnitBackendHandler registerStorageUnitBackendHandler;
     
     @Before
@@ -80,6 +92,8 @@ public final class RegisterStorageUnitBackendHandlerTest extends ProxyContextRes
         when(metaDataContexts.getMetaData().getDatabase("test_db")).thenReturn(database);
         when(metaDataContexts.getMetaData().containsDatabase("test_db")).thenReturn(true);
         when(connectionSession.getProtocolType()).thenReturn(new MySQLDatabaseType());
+        when(database.getRuleMetaData()).thenReturn(ruleMetaData);
+        when(ruleMetaData.findSingleRule(ReadwriteSplittingRule.class)).thenReturn(Optional.of(readwriteSplittingRule));
         registerStorageUnitBackendHandler = new RegisterStorageUnitBackendHandler(registerStorageUnitStatement, connectionSession);
         Field field = registerStorageUnitBackendHandler.getClass().getDeclaredField("validator");
         field.setAccessible(true);
@@ -94,12 +108,13 @@ public final class RegisterStorageUnitBackendHandlerTest extends ProxyContextRes
         when(metaDataContexts.getMetaData().getDatabases()).thenReturn(Collections.singletonMap("test_db", database));
         when(database.getResourceMetaData()).thenReturn(resourceMetaData);
         when(resourceMetaData.getDataSources()).thenReturn(Collections.emptyMap());
+        when(readwriteSplittingRule.getConfiguration()).thenReturn(createReadwriteSplittingRuleConfiguration("read_write"));
         ResponseHeader responseHeader = registerStorageUnitBackendHandler.execute("test_db", createRegisterStorageUnitStatement());
         assertThat(responseHeader, instanceOf(UpdateResponseHeader.class));
     }
     
     @Test(expected = DuplicateResourceException.class)
-    public void assertExecuteWithDuplicateStorageUnitNames() {
+    public void assertExecuteWithDuplicateStorageUnitNamesInStatement() {
         ContextManager contextManager = mock(ContextManager.class, RETURNS_DEEP_STUBS);
         when(contextManager.getMetaDataContexts()).thenReturn(metaDataContexts);
         ProxyContext.init(contextManager);
@@ -109,6 +124,34 @@ public final class RegisterStorageUnitBackendHandlerTest extends ProxyContextRes
         registerStorageUnitBackendHandler.execute("test_db", createRegisterStorageUnitStatementWithDuplicateStorageUnitNames());
     }
     
+    @Test(expected = DuplicateResourceException.class)
+    public void assertExecuteWithDuplicateStorageUnitNamesWithResourceMetaData() {
+        ContextManager contextManager = mock(ContextManager.class, RETURNS_DEEP_STUBS);
+        when(contextManager.getMetaDataContexts()).thenReturn(metaDataContexts);
+        ProxyContext.init(contextManager);
+        when(metaDataContexts.getMetaData().getDatabases()).thenReturn(Collections.singletonMap("test_db", database));
+        when(database.getResourceMetaData()).thenReturn(resourceMetaData);
+        when(resourceMetaData.getDataSources()).thenReturn(Collections.singletonMap("ds_0", null));
+        registerStorageUnitBackendHandler.execute("test_db", createRegisterStorageUnitStatement());
+    }
+    
+    @Test(expected = InvalidResourcesException.class)
+    public void assertExecuteWithDuplicateStorageUnitNamesWithReadwriteSplittingRule() {
+        ContextManager contextManager = mock(ContextManager.class, RETURNS_DEEP_STUBS);
+        when(contextManager.getMetaDataContexts()).thenReturn(metaDataContexts);
+        ProxyContext.init(contextManager);
+        when(metaDataContexts.getMetaData().getDatabases()).thenReturn(Collections.singletonMap("test_db", database));
+        when(database.getResourceMetaData()).thenReturn(resourceMetaData);
+        when(resourceMetaData.getDataSources()).thenReturn(Collections.emptyMap());
+        when(readwriteSplittingRule.getConfiguration()).thenReturn(createReadwriteSplittingRuleConfiguration("ds_0"));
+        registerStorageUnitBackendHandler.execute("test_db", createRegisterStorageUnitStatement());
+    }
+    
+    private ReadwriteSplittingRuleConfiguration createReadwriteSplittingRuleConfiguration(final String ruleName) {
+        ReadwriteSplittingDataSourceRuleConfiguration configuration = new ReadwriteSplittingDataSourceRuleConfiguration(ruleName, null, null, null);
+        return new ReadwriteSplittingRuleConfiguration(Collections.singleton(configuration), Collections.emptyMap());
+    }
+    
     private RegisterStorageUnitStatement createRegisterStorageUnitStatement() {
         return new RegisterStorageUnitStatement(Collections.singleton(new URLBasedDataSourceSegment("ds_0", "jdbc:mysql://127.0.0.1:3306/test0", "root", "", new Properties())));
     }