You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@shardingsphere.apache.org by GitBox <gi...@apache.org> on 2019/08/09 09:32:04 UTC

[GitHub] [incubator-shardingsphere] soca2013 opened a new issue #2843: I modify this code to support MySQL and SQLServer ,it's has some problem?

soca2013 opened a new issue #2843: I modify this code to support  MySQL and SQLServer ,it's has some problem?
URL: https://github.com/apache/incubator-shardingsphere/issues/2843
 
 
    AbstractDataSourceAdapter class delete  this code
   `
   Preconditions.checkState(null == result || result.equals(databaseType), String.format("Database type inconsistent with '%s' and '%s'", result, databaseType));
   `     
    I find ShardingTransactionManager implements cann't use DatabaseType.
   
   
   
    ShardingDataSourceMetaData modity 
   
   `
       public ShardingDataSourceMetaData(final Map<String, DataSource> dataSourceURLs, final ShardingRule shardingRule, final DatabaseType databaseType) throws SQLException {
           dataSourceMetaDataMap = getDataSourceMetaDataMap(dataSourceURLs, shardingRule, databaseType);
       }
   
       private Map<String, DataSourceMetaData> getDataSourceMetaDataMap(final Map<String, DataSource> dataSourceURLs, final ShardingRule shardingRule, final DatabaseType databaseType) throws SQLException {
           Map<String, DataSourceMetaData> dataSourceMetaData = getDataSourceMetaDataMapForSharding(dataSourceURLs, databaseType);
           return shardingRule.getMasterSlaveRules().isEmpty() ? dataSourceMetaData : getDataSourceMetaDataMapForMasterSlave(shardingRule, dataSourceMetaData);
       }
   
       private Map<String, DataSourceMetaData> getDataSourceMetaDataMapForSharding(final Map<String, DataSource> dataSourceURLs, final DatabaseType databaseType) throws SQLException {
           Map<String, DataSourceMetaData> result = new LinkedHashMap<>(dataSourceURLs.size(), 1);
           for (Entry<String, DataSource> entry : dataSourceURLs.entrySet()) {
               DatabaseType databaseTypeValue = DatabaseType.valueFrom(entry.getValue().getConnection().getMetaData().getDatabaseProductName());
               result.put(entry.getKey(), DataSourceMetaDataFactory.newInstance(databaseTypeValue, entry.getValue().getConnection().getMetaData().getURL()));
           }
           return result;
       }
   
       private Map<String, DataSourceMetaData> getDataSourceMetaDataMapForMasterSlave(final ShardingRule shardingRule, final Map<String, DataSourceMetaData> dataSourceMetaDataMap) {
           Map<String, DataSourceMetaData> result = new LinkedHashMap<>(dataSourceMetaDataMap);
           for (Entry<String, DataSourceMetaData> entry : dataSourceMetaDataMap.entrySet()) {
               Optional<MasterSlaveRule> masterSlaveRule = shardingRule.findMasterSlaveRule(entry.getKey());
               if (masterSlaveRule.isPresent() && masterSlaveRule.get().getMasterDataSourceName().equals(entry.getKey())) {
                   reviseMasterSlaveMetaData(result, entry.getValue(), masterSlaveRule.get());
               }
           }
           return result;
       }
   
       private void reviseMasterSlaveMetaData(final Map<String, DataSourceMetaData> dataSourceMetaDataMap, final DataSourceMetaData masterSlaveDataSourceMetaData, final MasterSlaveRule masterSlaveRule) {
           dataSourceMetaDataMap.put(masterSlaveRule.getName(), masterSlaveDataSourceMetaData);
           dataSourceMetaDataMap.remove(masterSlaveRule.getMasterDataSourceName());
           for (String each : masterSlaveRule.getSlaveDataSourceNames()) {
               dataSourceMetaDataMap.remove(each);
           }
       }
   
       /**
        * Get all instance data source names.
        *
        * @return instance data source names
        */
       public Collection<String> getAllInstanceDataSourceNames() {
           Collection<String> result = new LinkedList<>();
           for (Entry<String, DataSourceMetaData> entry : dataSourceMetaDataMap.entrySet()) {
               if (!isExisted(entry.getKey(), result)) {
                   result.add(entry.getKey());
               }
           }
           return result;
       }
   `
   
   SQLExecuteCallback modify 
   `
       private T execute0(final StatementExecuteUnit statementExecuteUnit, final boolean isTrunkThread, final Map<String, Object> shardingExecuteDataMap) throws SQLException {
           ExecutorExceptionHandler.setExceptionThrown(isExceptionThrown);
           DatabaseType databaseTypeValue = DatabaseType.valueFrom(statementExecuteUnit.getStatement().getConnection().getMetaData().getDatabaseProductName());
           DataSourceMetaData dataSourceMetaData = DataSourceMetaDataFactory.newInstance(databaseTypeValue, statementExecuteUnit.getStatement().getConnection().getMetaData().getURL());
           SQLExecutionHook sqlExecutionHook = new SPISQLExecutionHook();
           try {
               sqlExecutionHook.start(statementExecuteUnit.getRouteUnit(), dataSourceMetaData, isTrunkThread, shardingExecuteDataMap);
               T result = executeSQL(statementExecuteUnit.getRouteUnit(), statementExecuteUnit.getStatement(), statementExecuteUnit.getConnectionMode());
               sqlExecutionHook.finishSuccess();
               return result;
           } catch (final SQLException ex) {
               sqlExecutionHook.finishFailure(ex);
               ExecutorExceptionHandler.handleException(ex);
               return null;
           }
       }
   `
   
   
   
   
   
   
   
   

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services