You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@doris.apache.org by mo...@apache.org on 2022/06/17 04:22:14 UTC

[incubator-doris] branch dev-1.0.1 updated (42ce8361ec -> f9f67880b5)

This is an automated email from the ASF dual-hosted git repository.

morningman pushed a change to branch dev-1.0.1
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git


    from 42ce8361ec [Feature] compaction quickly for small data import (#9804)
     new 2d3a3b1a69 [feature] Support hive on s3 (#10128)
     new a9e7a1e2a5 [fix](optimizer) Fix the default join reorder algorithm (#10174)
     new 3343384172 [fix](vectorized) intersect operator takes too long time to execute (#10183)
     new f9f67880b5 [Bug][Vectorized] Fix DCHECK failed in VExchangeNode close twice (#10184)

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 be/src/vec/common/hash_table/hash_table.h          |  7 +++-
 be/src/vec/exec/vexchange_node.cpp                 |  5 ++-
 be/src/vec/exec/vset_operation_node.h              | 20 ++++++++--
 be/src/vec/exec/vunion_node.cpp                    |  1 +
 .../java/org/apache/doris/analysis/SelectStmt.java | 32 ++++++++--------
 .../doris/catalog/HiveMetaStoreClientHelper.java   | 43 +++++++++++++++++++---
 .../java/org/apache/doris/catalog/HiveTable.java   | 19 ++++++----
 .../org/apache/doris/planner/HiveScanNode.java     | 21 +++++++++--
 .../org/apache/doris/planner/QueryPlanTest.java    |  1 -
 9 files changed, 111 insertions(+), 38 deletions(-)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org


[incubator-doris] 04/04: [Bug][Vectorized] Fix DCHECK failed in VExchangeNode close twice (#10184)

Posted by mo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch dev-1.0.1
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git

commit f9f67880b53b62863c6b151bf5668464e219687f
Author: HappenLee <ha...@hotmail.com>
AuthorDate: Thu Jun 16 23:56:49 2022 +0800

    [Bug][Vectorized] Fix DCHECK failed in VExchangeNode close twice (#10184)
    
    Co-authored-by: lihaopeng <li...@baidu.com>
---
 be/src/vec/exec/vexchange_node.cpp | 5 ++++-
 be/src/vec/exec/vunion_node.cpp    | 1 +
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git a/be/src/vec/exec/vexchange_node.cpp b/be/src/vec/exec/vexchange_node.cpp
index 83c9e31b56..014716b542 100644
--- a/be/src/vec/exec/vexchange_node.cpp
+++ b/be/src/vec/exec/vexchange_node.cpp
@@ -95,10 +95,13 @@ Status VExchangeNode::get_next(RuntimeState* state, Block* block, bool* eos) {
 }
 
 Status VExchangeNode::close(RuntimeState* state) {
+    if (is_closed()) {
+        return Status::OK();
+    }
+
     if (_stream_recvr != nullptr) {
         _stream_recvr->close();
     }
-
     if (_is_merging) _vsort_exec_exprs.close(state);
 
     return ExecNode::close(state);
diff --git a/be/src/vec/exec/vunion_node.cpp b/be/src/vec/exec/vunion_node.cpp
index c05b3ef6a8..5f1bfefa17 100644
--- a/be/src/vec/exec/vunion_node.cpp
+++ b/be/src/vec/exec/vunion_node.cpp
@@ -213,6 +213,7 @@ Status VUnionNode::get_next(RuntimeState* state, Block* block, bool* eos) {
     RETURN_IF_CANCELLED(state);
     // RETURN_IF_ERROR(QueryMaintenance(state));
 
+    // TODO: Rethink the logic, which cause close the exec node twice.
     if (_to_close_child_idx != -1) {
         // The previous child needs to be closed if passthrough was enabled for it. In the non
         // passthrough case, the child was already closed in the previous call to get_next().


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org


[incubator-doris] 03/04: [fix](vectorized) intersect operator takes too long time to execute (#10183)

Posted by mo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch dev-1.0.1
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git

commit 334338417218b7eab8d8fa24d30318b44c13aca0
Author: starocean999 <40...@users.noreply.github.com>
AuthorDate: Fri Jun 17 08:43:53 2022 +0800

    [fix](vectorized) intersect operator takes too long time to execute (#10183)
    
    * fix itersect operator takes too long time to execute
    
    * modify code based on review comments
---
 be/src/vec/common/hash_table/hash_table.h |  7 +++++--
 be/src/vec/exec/vset_operation_node.h     | 20 ++++++++++++++++----
 2 files changed, 21 insertions(+), 6 deletions(-)

diff --git a/be/src/vec/common/hash_table/hash_table.h b/be/src/vec/common/hash_table/hash_table.h
index c55d806699..920c819694 100644
--- a/be/src/vec/common/hash_table/hash_table.h
+++ b/be/src/vec/common/hash_table/hash_table.h
@@ -887,9 +887,12 @@ public:
     }
 
     void delete_zero_key(Key key) {
-        if (Cell::is_zero(key, *this))
-             this->clear_get_has_zero();
+        if (this->get_has_zero() && Cell::is_zero(key, *this)) {
+            --m_size;
+            this->clear_get_has_zero();
+        }
     }
+
     void clear() {
         destroy_elements();
         this->clear_get_has_zero();
diff --git a/be/src/vec/exec/vset_operation_node.h b/be/src/vec/exec/vset_operation_node.h
index 1f8519c955..ba4eba3013 100644
--- a/be/src/vec/exec/vset_operation_node.h
+++ b/be/src/vec/exec/vset_operation_node.h
@@ -112,23 +112,35 @@ void VSetOperationNode::refresh_hash_table() {
 
                     arg.init_once();
                     auto& iter = arg.iter;
-                    for (; iter != arg.hash_table.end(); ++iter) {
+                    auto iter_end = arg.hash_table.end();
+                    while (iter != iter_end) {
                         auto& mapped = iter->get_second();
                         auto it = mapped.begin();
 
                         if constexpr (keep_matched) { //intersected
                             if (it->visited) {
                                 it->visited = false;
-                                if (is_need_shrink)
+                                if (is_need_shrink) {
                                     tmp_hash_table.hash_table.insert(iter->get_value());
+                                }
+                                ++iter;
                             } else {
-                                arg.hash_table.delete_zero_key(iter->get_first());
-                                iter->set_zero();
+                                if (!is_need_shrink) {
+                                    arg.hash_table.delete_zero_key(iter->get_first());
+                                    // the ++iter would check if the current key is zero. if it does, the iterator will be moved to the container's head.
+                                    // so we do ++iter before set_zero to make the iterator move to next valid key correctly.
+                                    auto iter_prev = iter;
+                                    ++iter;
+                                    iter_prev->set_zero();
+                                } else {
+                                    ++iter;
+                                }
                             }
                         } else { //except
                             if (!it->visited && is_need_shrink) {
                                 tmp_hash_table.hash_table.insert(iter->get_value());
                             }
+                            ++iter;
                         }
                     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org


[incubator-doris] 01/04: [feature] Support hive on s3 (#10128)

Posted by mo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch dev-1.0.1
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git

commit 2d3a3b1a69a1d9db8edefc68b9165e403a28ae2e
Author: morningman <mo...@163.com>
AuthorDate: Fri Jun 17 11:48:58 2022 +0800

    [feature] Support hive on s3 (#10128)
    
    Support query hive table on S3. Pass AK/SK, Region and s3 endpoint to hive table while creating the external table.
    
    example create table sql:
    ```
    CREATE TABLE `region_s3` (
    `r_regionkey` integer NOT NULL,
    `r_name` char(25) NOT NULL,
    `r_comment` varchar(152) )
    engine=hive
    properties
    ("database"="default",
    "table"="region_s3",
    “hive.metastore.uris"="thrift://127.0.0.1:9083",
    “AWS_ACCESS_KEY”=“YOUR_ACCESS_KEY",
    “AWS_SECRET_KEY”=“YOUR_SECRET_KEY",
    "AWS_ENDPOINT"="s3.us-east-1.amazonaws.com",
    “AWS_REGION”=“us-east-1”);
    ```
---
 .../doris/catalog/HiveMetaStoreClientHelper.java   | 43 +++++++++++++++++++---
 .../java/org/apache/doris/catalog/HiveTable.java   | 19 ++++++----
 .../org/apache/doris/planner/HiveScanNode.java     | 21 +++++++++--
 3 files changed, 68 insertions(+), 15 deletions(-)

diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java
index f192df298b..b49a7e5d7d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java
@@ -29,6 +29,7 @@ import org.apache.doris.analysis.IntLiteral;
 import org.apache.doris.analysis.LiteralExpr;
 import org.apache.doris.analysis.NullLiteral;
 import org.apache.doris.analysis.SlotRef;
+import org.apache.doris.analysis.StorageBackend;
 import org.apache.doris.analysis.StringLiteral;
 import org.apache.doris.common.DdlException;
 import org.apache.doris.common.util.BrokerUtil;
@@ -170,10 +171,11 @@ public class HiveMetaStoreClientHelper {
      * @throws DdlException
      */
     public static String getHiveDataFiles(HiveTable hiveTable, ExprNodeGenericFuncDesc hivePartitionPredicate,
-                                          List<TBrokerFileStatus> fileStatuses, Table remoteHiveTbl) throws DdlException {
+                                          List<TBrokerFileStatus> fileStatuses, Table remoteHiveTbl, StorageBackend.StorageType type) throws DdlException {
         HiveMetaStoreClient client = getClient(hiveTable.getHiveProperties().get(HiveTable.HIVE_METASTORE_URIS));
 
         List<RemoteIterator<LocatedFileStatus>> remoteIterators;
+        Boolean onS3 = type.equals(StorageBackend.StorageType.S3);
         if (remoteHiveTbl.getPartitionKeys().size() > 0) {
             // hive partitioned table, get file iterator from table partition sd info
             List<Partition> hivePartitions = new ArrayList<>();
@@ -186,10 +188,10 @@ public class HiveMetaStoreClientHelper {
             } finally {
                 client.close();
             }
-            remoteIterators = getRemoteIterator(hivePartitions, hiveTable.getHiveProperties());
+            remoteIterators = getRemoteIterator(hivePartitions, hiveTable.getHiveProperties(), onS3);
         } else {
             // hive non-partitioned table, get file iterator from table sd info
-            remoteIterators = getRemoteIterator(remoteHiveTbl, hiveTable.getHiveProperties());
+            remoteIterators = getRemoteIterator(remoteHiveTbl, hiveTable.getHiveProperties(), onS3);
         }
 
         String hdfsUrl = "";
@@ -204,6 +206,12 @@ public class HiveMetaStoreClientHelper {
                     // path = "/path/to/partition/file_name"
                     // eg: /home/work/dev/hive/apache-hive-2.3.7-bin/data/warehouse/dae.db/customer/state=CA/city=SanJose/000000_0
                     String path = fileStatus.getPath().toUri().getPath();
+                    if (onS3) {
+                        // Backend need full s3 path (with s3://bucket at the beginning) to read the data on s3.
+                        // path = "s3://bucket/path/to/partition/file_name"
+                        // eg: s3://hive-s3-test/region/region.tbl
+                        path = fileStatus.getPath().toString();
+                    }
                     brokerFileStatus.setPath(path);
                     fileStatuses.add(brokerFileStatus);
                     if (StringUtils.isEmpty(hdfsUrl)) {
@@ -222,7 +230,24 @@ public class HiveMetaStoreClientHelper {
         return hdfsUrl;
     }
 
-    private static List<RemoteIterator<LocatedFileStatus>> getRemoteIterator(List<Partition> partitions, Map<String, String> properties) throws DdlException {
+    private static void setS3Configuration(Configuration configuration, Map<String, String> properties) {
+        if (properties.containsKey(HiveTable.S3_AK)) {
+            configuration.set("fs.s3a.access.key", properties.get(HiveTable.S3_AK));
+        }
+        if (properties.containsKey(HiveTable.S3_SK)) {
+            configuration.set("fs.s3a.secret.key", properties.get(HiveTable.S3_SK));
+        }
+        if (properties.containsKey(HiveTable.S3_ENDPOINT)) {
+            configuration.set("fs.s3a.endpoint", properties.get(HiveTable.S3_ENDPOINT));
+        }
+        configuration.set("fs.s3.impl.disable.cache", "true");
+        configuration.set("fs.s3.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem");
+        configuration.set("fs.s3a.attempts.maximum", "2");
+    }
+
+    private static List<RemoteIterator<LocatedFileStatus>> getRemoteIterator(
+            List<Partition> partitions, Map<String, String> properties, boolean onS3)
+            throws DdlException {
         List<RemoteIterator<LocatedFileStatus>> iterators = new ArrayList<>();
         Configuration configuration = new Configuration(false);
         for (Map.Entry<String, String> entry : properties.entrySet()) {
@@ -230,6 +255,9 @@ public class HiveMetaStoreClientHelper {
                 configuration.set(entry.getKey(), entry.getValue());
             }
         }
+        if (onS3) {
+            setS3Configuration(configuration, properties);
+        }
         for (Partition p : partitions) {
             String location = p.getSd().getLocation();
             org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path(location);
@@ -244,7 +272,9 @@ public class HiveMetaStoreClientHelper {
         return iterators;
     }
 
-    private static List<RemoteIterator<LocatedFileStatus>> getRemoteIterator(Table table, Map<String, String> properties) throws DdlException {
+    private static List<RemoteIterator<LocatedFileStatus>> getRemoteIterator(
+            Table table, Map<String, String> properties, boolean onS3)
+            throws DdlException {
         List<RemoteIterator<LocatedFileStatus>> iterators = new ArrayList<>();
         Configuration configuration = new Configuration(false);
         boolean isSecurityEnabled = false;
@@ -257,6 +287,9 @@ public class HiveMetaStoreClientHelper {
                 isSecurityEnabled = true;
             }
         }
+        if (onS3) {
+            setS3Configuration(configuration, properties);
+        }
         String location = table.getSd().getLocation();
         org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path(location);
         try {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java
index 19be317f12..812185d15e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java
@@ -42,15 +42,19 @@ public class HiveTable extends Table {
     private static final String PROPERTY_MISSING_MSG = "Hive %s is null. Please add properties('%s'='xxx') when create table";
     private static final String PROPERTY_ERROR_MSG = "Hive table properties('%s'='%s') is illegal or not supported. Please check it";
 
-    private static final String HIVE_DB = "database";
-    private static final String HIVE_TABLE = "table";
-    public static final String HIVE_METASTORE_URIS = "hive.metastore.uris";
-    public static final String HIVE_HDFS_PREFIX = "dfs";
-
     private String hiveDb;
     private String hiveTable;
     private Map<String, String> hiveProperties = Maps.newHashMap();
 
+    public static final String HIVE_DB = "database";
+    public static final String HIVE_TABLE = "table";
+    public static final String HIVE_METASTORE_URIS = "hive.metastore.uris";
+    public static final String HIVE_HDFS_PREFIX = "dfs";
+    public static final String S3_PROPERTIES_PREFIX = "AWS";
+    public static final String S3_AK = "AWS_ACCESS_KEY";
+    public static final String S3_SK = "AWS_SECRET_KEY";
+    public static final String S3_ENDPOINT = "AWS_ENDPOINT";
+
     public HiveTable() {
         super(TableType.HIVE);
     }
@@ -142,8 +146,9 @@ public class HiveTable extends Table {
             Iterator<Map.Entry<String, String>> iter = copiedProps.entrySet().iterator();
             while(iter.hasNext()) {
                 Map.Entry<String, String> entry = iter.next();
-                if (entry.getKey().startsWith(HIVE_HDFS_PREFIX)) {
-                    hiveProperties.put(entry.getKey(), entry.getValue());
+                String key = entry.getKey();
+                if (key.startsWith(HIVE_HDFS_PREFIX) || key.startsWith(S3_PROPERTIES_PREFIX)) {
+                    hiveProperties.put(key, entry.getValue());
                     iter.remove();
                 }
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java
index 711e63e695..cfd76de134 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/HiveScanNode.java
@@ -17,6 +17,7 @@
 
 package org.apache.doris.planner;
 
+import org.apache.commons.lang3.StringUtils;
 import org.apache.doris.analysis.Analyzer;
 import org.apache.doris.analysis.BrokerDesc;
 import org.apache.doris.analysis.Expr;
@@ -68,6 +69,7 @@ public class HiveScanNode extends BrokerScanNode {
     private String fileFormat;
     private String path;
     private List<String> partitionKeys = new ArrayList<>();
+    private StorageBackend.StorageType storageType;
     /* hive table properties */
 
     public String getHostUri() {
@@ -123,13 +125,26 @@ public class HiveScanNode extends BrokerScanNode {
                         getFileFormat(),
                         getPartitionKeys(),
                         getParsedColumnExprList()));
-        brokerDesc = new BrokerDesc("HiveTableDesc", StorageBackend.StorageType.HDFS, hiveTable.getHiveProperties());
+        brokerDesc = new BrokerDesc("HiveTableDesc", storageType, hiveTable.getHiveProperties());
         targetTable = hiveTable;
     }
 
-    private void initHiveTblProperties() throws DdlException {
+    private void setStorageType(String location) throws UserException {
+        String[] strings = StringUtils.split(location, "/");
+        String storagePrefix = strings[0].split(":")[0];
+        if (storagePrefix.equalsIgnoreCase("s3")) {
+            this.storageType = StorageBackend.StorageType.S3;
+        } else if (storagePrefix.equalsIgnoreCase("hdfs")) {
+            this.storageType = StorageBackend.StorageType.HDFS;
+        } else {
+            throw new UserException("Not supported storage type: " + storagePrefix);
+        }
+    }
+
+    private void initHiveTblProperties() throws UserException {
         this.remoteHiveTable = HiveMetaStoreClientHelper.getTable(hiveTable);
         this.fileFormat = HiveMetaStoreClientHelper.HiveFileFormat.getFormat(remoteHiveTable.getSd().getInputFormat());
+        this.setStorageType(remoteHiveTable.getSd().getLocation());
 
         Map<String, String> serDeInfoParams = remoteHiveTable.getSd().getSerdeInfo().getParameters();
         this.columnSeparator = Strings.isNullOrEmpty(serDeInfoParams.get("field.delim")) ?
@@ -179,7 +194,7 @@ public class HiveScanNode extends BrokerScanNode {
         }
         List<TBrokerFileStatus> fileStatuses = new ArrayList<>();
         this.hdfsUri = HiveMetaStoreClientHelper.getHiveDataFiles(hiveTable, hivePartitionPredicate,
-            fileStatuses, remoteHiveTable);
+                fileStatuses, remoteHiveTable, storageType);
         fileStatusesList.add(fileStatuses);
         filesAdded += fileStatuses.size();
         for (TBrokerFileStatus fstatus : fileStatuses) {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org


[incubator-doris] 02/04: [fix](optimizer) Fix the default join reorder algorithm (#10174)

Posted by mo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch dev-1.0.1
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git

commit a9e7a1e2a562c0424d58d772706d7d361a003dc7
Author: Kikyou1997 <33...@users.noreply.github.com>
AuthorDate: Fri Jun 17 10:59:33 2022 +0800

    [fix](optimizer) Fix the default join reorder algorithm (#10174)
    
    Default join reorder algorithm not working for the most cases.
---
 .../java/org/apache/doris/analysis/SelectStmt.java | 32 ++++++++++++----------
 .../org/apache/doris/planner/QueryPlanTest.java    |  1 -
 2 files changed, 17 insertions(+), 16 deletions(-)

diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java
index 8852c9fc61..e18f5cb11c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java
@@ -733,7 +733,7 @@ public class SelectStmt extends QueryStmt {
 
     protected void reorderTable(Analyzer analyzer) throws AnalysisException {
         List<Pair<TableRef, Long>> candidates = Lists.newArrayList();
-
+        List<TableRef> originOrderBackUp = Lists.newArrayList(fromClause_.getTableRefs());
         // New pair of table ref and row count
         for (TableRef tblRef : fromClause_) {
             if (tblRef.getJoinOp() != JoinOperator.INNER_JOIN || tblRef.hasJoinHints()) {
@@ -771,8 +771,8 @@ public class SelectStmt extends QueryStmt {
 
         // can not get AST only with equal join, MayBe cross join can help
         fromClause_.clear();
-        for (Pair<TableRef, Long> candidate : candidates) {
-            fromClause_.add(candidate.first);
+        for (TableRef tableRef : originOrderBackUp) {
+            fromClause_.add(tableRef);
         }
     }
 
@@ -817,19 +817,21 @@ public class SelectStmt extends QueryStmt {
                     // is being added.
                     Preconditions.checkState(tid == candidateTableRef.getId());
                     List<Expr> candidateEqJoinPredicates = analyzer.getEqJoinConjunctsExcludeAuxPredicates(tid);
-                    List<TupleId> candidateTupleList = Lists.newArrayList();
-                    Expr.getIds(candidateEqJoinPredicates, candidateTupleList, null);
-                    int count = candidateTupleList.size();
-                    for (TupleId tupleId : candidateTupleList) {
-                        if (validTupleId.contains(tupleId) || tid == tupleId) {
-                            count--;
+                    for (Expr candidateEqJoinPredicate : candidateEqJoinPredicates) {
+                        List<TupleId> candidateTupleList = Lists.newArrayList();
+                        Expr.getIds(Lists.newArrayList(candidateEqJoinPredicate), candidateTupleList, null);
+                        int count = candidateTupleList.size();
+                        for (TupleId tupleId : candidateTupleList) {
+                            if (validTupleId.contains(tupleId) || tid.equals(tupleId)) {
+                                count--;
+                            }
+                        }
+                        if (count == 0) {
+                            fromClause_.add(candidateTableRef);
+                            validTupleId.add(tid);
+                            tableRefMap.remove(tid);
+                            break;
                         }
-                    }
-
-                    if (count == 0) {
-                        fromClause_.add(candidateTableRef);
-                        validTupleId.add(tid);
-                        tableRefMap.remove(tid);
                     }
                 }
             }
diff --git a/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java b/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java
index df97fbc845..e97cc16c45 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/planner/QueryPlanTest.java
@@ -2152,5 +2152,4 @@ public class QueryPlanTest {
         Assert.assertFalse(explainString.contains("non-equal FULL OUTER JOIN is not supported"));
 
     }
-
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org