You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@doris.apache.org by mo...@apache.org on 2021/01/21 04:09:26 UTC

[incubator-doris] branch master updated: [LOG] Standardize the use of VLOG in code (#5264)

This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 93a4c7e  [LOG] Standardize the use of VLOG in code (#5264)
93a4c7e is described below

commit 93a4c7efc164654d350bd6d7c448ec689125e0a5
Author: Zhengguo Yang <ya...@gmail.com>
AuthorDate: Thu Jan 21 12:09:09 2021 +0800

    [LOG] Standardize the use of VLOG in code (#5264)
    
    At present, the application of vlog in the code is quite confusing.
    It is inherited from impala VLOG_XX format, and there is also VLOG(number) format.
    VLOG(number) format does not have a unified specification, so this pr standardizes the use of VLOG
---
 be/src/agent/pusher.cpp                            |  2 +-
 be/src/agent/task_worker_pool.cpp                  |  2 +-
 be/src/common/logging.h                            | 15 ++++++--
 be/src/exec/broker_scanner.cpp                     |  2 +-
 be/src/exec/csv_scan_node.cpp                      |  8 ++--
 be/src/exec/csv_scanner.cpp                        |  2 +-
 be/src/exec/decompressor.cpp                       |  2 +-
 be/src/exec/es/es_scan_reader.cpp                  |  4 +-
 be/src/exec/es_http_scan_node.cpp                  |  2 +-
 be/src/exec/es_scan_node.cpp                       | 44 +++++++++++-----------
 be/src/exec/exec_node.cpp                          |  2 +-
 be/src/exec/json_scanner.cpp                       |  2 +-
 be/src/exec/mysql_scan_node.cpp                    |  6 +--
 be/src/exec/mysql_scanner.cpp                      |  2 +-
 be/src/exec/odbc_scan_node.cpp                     |  6 +--
 be/src/exec/olap_scan_node.cpp                     | 33 ++++++++--------
 be/src/exec/olap_scan_node.h                       |  2 +-
 be/src/exec/olap_scanner.cpp                       |  2 +-
 be/src/exec/olap_utils.h                           |  2 +-
 be/src/exec/parquet_reader.cpp                     |  4 +-
 be/src/exec/partitioned_aggregation_node.cc        |  2 +-
 be/src/exec/partitioned_hash_table.cc              |  4 +-
 be/src/exec/tablet_sink.cpp                        |  2 +-
 be/src/exprs/json_functions.cpp                    | 10 ++---
 be/src/exprs/timestamp_functions.cpp               |  2 +-
 be/src/gutil/atomicops-internals-x86.cc            |  2 +-
 be/src/gutil/strings/split.cc                      |  6 +--
 be/src/http/action/mini_load.cpp                   |  2 +-
 be/src/http/action/stream_load.cpp                 |  2 +-
 be/src/http/utils.cpp                              |  2 +-
 be/src/http/web_page_handler.cpp                   |  2 +-
 be/src/olap/cumulative_compaction.cpp              |  2 +-
 be/src/olap/cumulative_compaction_policy.cpp       |  4 +-
 be/src/olap/delete_handler.cpp                     |  2 +-
 be/src/olap/delta_writer.cpp                       |  2 +-
 be/src/olap/file_helper.cpp                        | 12 +++---
 be/src/olap/file_helper.h                          |  4 +-
 be/src/olap/file_stream.cpp                        |  4 +-
 be/src/olap/fs/file_block_manager.cpp              | 10 ++---
 be/src/olap/lru_cache.cpp                          |  2 +-
 be/src/olap/olap_cond.cpp                          |  2 +-
 be/src/olap/olap_index.cpp                         |  4 +-
 be/src/olap/olap_snapshot_converter.cpp            |  2 +-
 be/src/olap/out_stream.cpp                         |  6 +--
 be/src/olap/out_stream.h                           |  6 +--
 be/src/olap/push_handler.cpp                       | 24 ++++++------
 be/src/olap/reader.cpp                             |  8 ++--
 be/src/olap/rowset/alpha_rowset.cpp                |  8 ++--
 be/src/olap/rowset/alpha_rowset_reader.cpp         | 10 ++---
 be/src/olap/rowset/column_data.cpp                 |  4 +-
 be/src/olap/rowset/column_data_writer.cpp          |  2 +-
 be/src/olap/rowset/column_reader.cpp               |  2 +-
 be/src/olap/rowset/rowset.h                        |  4 +-
 be/src/olap/rowset/rowset_meta_manager.cpp         |  4 +-
 be/src/olap/rowset/run_length_byte_reader.cpp      |  2 +-
 be/src/olap/rowset/run_length_integer_writer.cpp   |  2 +-
 be/src/olap/rowset/run_length_integer_writer.h     |  2 +-
 be/src/olap/rowset/segment_group.cpp               | 16 ++++----
 be/src/olap/rowset/segment_reader.cpp              | 22 +++++------
 be/src/olap/rowset/segment_writer.cpp              |  4 +-
 be/src/olap/schema_change.cpp                      | 32 ++++++++--------
 be/src/olap/snapshot_manager.cpp                   |  6 +--
 be/src/olap/storage_engine.cpp                     | 12 +++---
 be/src/olap/stream_index_writer.cpp                |  2 +-
 be/src/olap/tablet.cpp                             | 22 +++++------
 be/src/olap/tablet_manager.cpp                     | 32 ++++++++--------
 be/src/olap/tablet_meta_manager.cpp                |  2 +-
 be/src/olap/task/engine_batch_load_task.cpp        |  4 +-
 be/src/olap/task/engine_checksum_task.cpp          |  2 +-
 be/src/olap/task/engine_clone_task.cpp             |  4 +-
 be/src/olap/txn_manager.cpp                        | 10 ++---
 be/src/olap/utils.cpp                              | 12 +++---
 be/src/olap/version_graph.cpp                      | 16 ++++----
 be/src/runtime/etl_job_mgr.cpp                     |  2 +-
 be/src/runtime/export_task_mgr.cpp                 |  2 +-
 be/src/runtime/load_channel_mgr.cpp                |  6 +--
 be/src/runtime/plan_fragment_executor.cpp          | 10 ++---
 be/src/runtime/routine_load/data_consumer.cpp      |  4 +-
 be/src/runtime/routine_load/data_consumer.h        |  2 +-
 .../runtime/routine_load/data_consumer_group.cpp   |  8 ++--
 be/src/runtime/routine_load/data_consumer_pool.cpp |  8 ++--
 .../routine_load/routine_load_task_executor.cpp    |  2 +-
 be/src/runtime/snapshot_loader.cpp                 | 32 ++++++++--------
 be/src/runtime/stream_load/load_stream_mgr.h       |  4 +-
 be/src/runtime/tablets_channel.cpp                 |  2 +-
 be/src/service/backend_options.cpp                 |  2 +-
 be/src/util/cidr.cpp                               |  2 +-
 be/src/util/mutex.cpp                              |  6 +--
 be/src/util/progress_updater.cpp                   |  8 ++--
 be/src/util/progress_updater.h                     |  5 ---
 be/src/util/tdigest.h                              | 32 ++++++++--------
 be/src/util/thread.cpp                             |  4 +-
 be/src/util/threadpool.cpp                         |  4 +-
 be/src/util/thrift_client.cpp                      |  4 +-
 be/src/util/thrift_util.h                          |  2 +-
 be/test/exec/new_olap_scan_node_test.cpp           |  8 ++--
 be/test/exprs/binary_predicate_test.cpp            |  6 +--
 be/test/exprs/in_op_test.cpp                       |  6 +--
 98 files changed, 342 insertions(+), 341 deletions(-)

diff --git a/be/src/agent/pusher.cpp b/be/src/agent/pusher.cpp
index 8e3ea72..5d1d2b0 100644
--- a/be/src/agent/pusher.cpp
+++ b/be/src/agent/pusher.cpp
@@ -134,7 +134,7 @@ AgentStatus Pusher::process(vector<TTabletInfo>* tablet_infos) {
             time_t now = time(NULL);
             if (_push_req.timeout > 0 && _push_req.timeout < now) {
                 // return status to break this callback
-                VLOG(3) << "check time out. time_out:" << _push_req.timeout << ", now:" << now;
+                VLOG_NOTICE << "check time out. time_out:" << _push_req.timeout << ", now:" << now;
                 is_timeout = true;
                 return Status::OK();
             }
diff --git a/be/src/agent/task_worker_pool.cpp b/be/src/agent/task_worker_pool.cpp
index 0d10314..e4848c7 100644
--- a/be/src/agent/task_worker_pool.cpp
+++ b/be/src/agent/task_worker_pool.cpp
@@ -639,7 +639,7 @@ void TaskWorkerPool::_push_worker_thread_callback() {
         }
 
         if (status == DORIS_SUCCESS) {
-            VLOG(3) << "push ok. signature: " << agent_task_req.signature
+            VLOG_NOTICE << "push ok. signature: " << agent_task_req.signature
                     << ", push_type: " << push_req.push_type;
             error_msgs.push_back("push success");
 
diff --git a/be/src/common/logging.h b/be/src/common/logging.h
index e3c3130..62265ce 100644
--- a/be/src/common/logging.h
+++ b/be/src/common/logging.h
@@ -62,13 +62,22 @@
 #define VLOG_FILE VLOG(2)
 #define VLOG_ROW VLOG(10)
 #define VLOG_PROGRESS VLOG(2)
+#define VLOG_TRACE VLOG(10)
+#define VLOG_DEBUG VLOG(7)
+#define VLOG_NOTICE VLOG(3)
+#define VLOG_CRITICAL VLOG(1)
+
 
 #define VLOG_CONNECTION_IS_ON VLOG_IS_ON(1)
-#define VLOG_RPC_IS_ON VLOG_IS_ON(2)
+#define VLOG_RPC_IS_ON VLOG_IS_ON(8)
 #define VLOG_QUERY_IS_ON VLOG_IS_ON(1)
 #define VLOG_FILE_IS_ON VLOG_IS_ON(2)
-#define VLOG_ROW_IS_ON VLOG_IS_ON(3)
-#define VLOG_PROGRESS_IS_ON VLOG_IS_ON(2)
+#define VLOG_ROW_IS_ON VLOG_IS_ON(10)
+#define VLOG_TRACE_IS_ON VLOG_IS_ON(10)
+#define VLOG_DEBUG_IS_ON VLOG_IS_ON(7)
+#define VLOG_NOTICE_IS_ON VLOG_IS_ON(3)
+#define VLOG_CRITICAL_IS_ON VLOG_IS_ON(1)
+
 
 /// Define a wrapper around DCHECK for strongly typed enums that print a useful error
 /// message on failure.
diff --git a/be/src/exec/broker_scanner.cpp b/be/src/exec/broker_scanner.cpp
index 4741163..99a8d8f 100644
--- a/be/src/exec/broker_scanner.cpp
+++ b/be/src/exec/broker_scanner.cpp
@@ -155,7 +155,7 @@ Status BrokerScanner::open_file_reader() {
     case TFileType::FILE_STREAM: {
         _stream_load_pipe = _state->exec_env()->load_stream_mgr()->get(range.load_id);
         if (_stream_load_pipe == nullptr) {
-            VLOG(3) << "unknown stream load id: " << UniqueId(range.load_id);
+            VLOG_NOTICE << "unknown stream load id: " << UniqueId(range.load_id);
             return Status::InternalError("unknown stream load id");
         }
         _cur_file_reader = _stream_load_pipe.get();
diff --git a/be/src/exec/csv_scan_node.cpp b/be/src/exec/csv_scan_node.cpp
index 7b073ed..23d54b9 100644
--- a/be/src/exec/csv_scan_node.cpp
+++ b/be/src/exec/csv_scan_node.cpp
@@ -112,7 +112,7 @@ Status CsvScanNode::init(const TPlanNode& tnode) {
 }
 
 Status CsvScanNode::prepare(RuntimeState* state) {
-    VLOG(1) << "CsvScanNode::Prepare";
+    VLOG_CRITICAL << "CsvScanNode::Prepare";
 
     if (_is_init) {
         return Status::OK();
@@ -206,7 +206,7 @@ Status CsvScanNode::prepare(RuntimeState* state) {
 
 Status CsvScanNode::open(RuntimeState* state) {
     RETURN_IF_ERROR(ExecNode::open(state));
-    VLOG(1) << "CsvScanNode::Open";
+    VLOG_CRITICAL << "CsvScanNode::Open";
 
     if (nullptr == state) {
         return Status::InternalError("input pointer is nullptr.");
@@ -227,7 +227,7 @@ Status CsvScanNode::open(RuntimeState* state) {
 }
 
 Status CsvScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) {
-    VLOG(1) << "CsvScanNode::GetNext";
+    VLOG_CRITICAL << "CsvScanNode::GetNext";
     if (nullptr == state || nullptr == row_batch || nullptr == eos) {
         return Status::InternalError("input is nullptr pointer");
     }
@@ -315,7 +315,7 @@ Status CsvScanNode::close(RuntimeState* state) {
     if (is_closed()) {
         return Status::OK();
     }
-    VLOG(1) << "CsvScanNode::Close";
+    VLOG_CRITICAL << "CsvScanNode::Close";
     RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::CLOSE));
 
     SCOPED_TIMER(_runtime_profile->total_time_counter());
diff --git a/be/src/exec/csv_scanner.cpp b/be/src/exec/csv_scanner.cpp
index 5841ecb..77a4224 100644
--- a/be/src/exec/csv_scanner.cpp
+++ b/be/src/exec/csv_scanner.cpp
@@ -40,7 +40,7 @@ CsvScanner::~CsvScanner() {
 }
 
 Status CsvScanner::open() {
-    VLOG(1) << "CsvScanner::Connect";
+    VLOG_CRITICAL << "CsvScanner::Connect";
 
     if (_is_open) {
         LOG(INFO) << "this scanner already opened";
diff --git a/be/src/exec/decompressor.cpp b/be/src/exec/decompressor.cpp
index ea6cc0e..56c74cb 100644
--- a/be/src/exec/decompressor.cpp
+++ b/be/src/exec/decompressor.cpp
@@ -111,7 +111,7 @@ Status GzipDecompressor::decompress(uint8_t* input, size_t input_len, size_t* in
         *input_bytes_read = input_len - _z_strm.avail_in;
         *decompressed_len = output_max_len - _z_strm.avail_out;
 
-        VLOG(10) << "gzip dec ret: " << ret << " input_bytes_read: " << *input_bytes_read
+        VLOG_TRACE << "gzip dec ret: " << ret << " input_bytes_read: " << *input_bytes_read
                  << " decompressed_len: " << *decompressed_len;
 
         if (ret == Z_BUF_ERROR) {
diff --git a/be/src/exec/es/es_scan_reader.cpp b/be/src/exec/es/es_scan_reader.cpp
index fe4dbed..7dc3809 100644
--- a/be/src/exec/es/es_scan_reader.cpp
+++ b/be/src/exec/es/es_scan_reader.cpp
@@ -111,7 +111,7 @@ Status ESScanReader::open() {
         LOG(WARNING) << ss.str();
         return Status::InternalError(ss.str());
     }
-    VLOG(1) << "open _cached response: " << _cached_response;
+    VLOG_CRITICAL << "open _cached response: " << _cached_response;
     return Status::OK();
 }
 
@@ -153,7 +153,7 @@ Status ESScanReader::get_next(bool* scan_eos, std::unique_ptr<ScrollParser>& scr
     }
 
     scroll_parser.reset(new ScrollParser(_doc_value_mode));
-    VLOG(1) << "get_next request ES, returned response: " << response;
+    VLOG_CRITICAL << "get_next request ES, returned response: " << response;
     Status status = scroll_parser->parse(response, _exactly_once);
     if (!status.ok()) {
         _eos = true;
diff --git a/be/src/exec/es_http_scan_node.cpp b/be/src/exec/es_http_scan_node.cpp
index d00a4aa..3905f4c 100644
--- a/be/src/exec/es_http_scan_node.cpp
+++ b/be/src/exec/es_http_scan_node.cpp
@@ -99,7 +99,7 @@ Status EsHttpScanNode::build_conjuncts_list() {
             _predicates.push_back(predicate);
             _predicate_to_conjunct.push_back(i);
         } else {
-            VLOG(1) << status.get_error_msg();
+            VLOG_CRITICAL << status.get_error_msg();
             status = predicate->get_es_query_status();
             if (!status.ok()) {
                 LOG(WARNING) << status.get_error_msg();
diff --git a/be/src/exec/es_scan_node.cpp b/be/src/exec/es_scan_node.cpp
index 097879c..59dc38b 100644
--- a/be/src/exec/es_scan_node.cpp
+++ b/be/src/exec/es_scan_node.cpp
@@ -60,7 +60,7 @@ EsScanNode::EsScanNode(ObjectPool* pool, const TPlanNode& tnode, const Descripto
 EsScanNode::~EsScanNode() {}
 
 Status EsScanNode::prepare(RuntimeState* state) {
-    VLOG(1) << "EsScanNode::Prepare";
+    VLOG_CRITICAL << "EsScanNode::Prepare";
 
     RETURN_IF_ERROR(ScanNode::prepare(state));
     _tuple_desc = state->desc_tbl().get_tuple_descriptor(_tuple_id);
@@ -76,7 +76,7 @@ Status EsScanNode::prepare(RuntimeState* state) {
 }
 
 Status EsScanNode::open(RuntimeState* state) {
-    VLOG(1) << "EsScanNode::Open";
+    VLOG_CRITICAL << "EsScanNode::Open";
 
     RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::OPEN));
     RETURN_IF_CANCELLED(state);
@@ -99,7 +99,7 @@ Status EsScanNode::open(RuntimeState* state) {
     std::vector<vector<TExtPredicate>> predicates;
     std::vector<int> predicate_to_conjunct;
     for (int i = 0; i < _conjunct_ctxs.size(); ++i) {
-        VLOG(1) << "conjunct: " << _conjunct_ctxs[i]->root()->debug_string();
+        VLOG_CRITICAL << "conjunct: " << _conjunct_ctxs[i]->root()->debug_string();
         std::vector<TExtPredicate> disjuncts;
         if (get_disjuncts(_conjunct_ctxs[i], _conjunct_ctxs[i]->root(), disjuncts)) {
             predicates.emplace_back(std::move(disjuncts));
@@ -196,7 +196,7 @@ Status EsScanNode::open(RuntimeState* state) {
 }
 
 Status EsScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) {
-    VLOG(1) << "EsScanNode::GetNext";
+    VLOG_CRITICAL << "EsScanNode::GetNext";
 
     RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::GETNEXT));
     RETURN_IF_CANCELLED(state);
@@ -216,7 +216,7 @@ Status EsScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos)
     _offsets[_scan_range_idx] += result.rows.num_rows;
 
     // convert
-    VLOG(1) << "begin to convert: scan_range_idx=" << _scan_range_idx
+    VLOG_CRITICAL << "begin to convert: scan_range_idx=" << _scan_range_idx
             << ", num_rows=" << result.rows.num_rows;
     std::vector<TExtColumnData>& cols = result.rows.cols;
     // indexes of the next non-null value in the row batch, per column.
@@ -237,10 +237,10 @@ Status EsScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos)
         }
     }
 
-    VLOG(1) << "finish one batch: num_rows=" << row_batch->num_rows();
+    VLOG_CRITICAL << "finish one batch: num_rows=" << row_batch->num_rows();
     COUNTER_SET(_rows_returned_counter, _num_rows_returned);
     if (result.__isset.eos && result.eos) {
-        VLOG(1) << "es finish one scan_range: scan_range_idx=" << _scan_range_idx;
+        VLOG_CRITICAL << "es finish one scan_range: scan_range_idx=" << _scan_range_idx;
         ++_scan_range_idx;
     }
     if (_scan_range_idx == _scan_ranges.size()) {
@@ -252,7 +252,7 @@ Status EsScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos)
 
 Status EsScanNode::close(RuntimeState* state) {
     if (is_closed()) return Status::OK();
-    VLOG(1) << "EsScanNode::Close";
+    VLOG_CRITICAL << "EsScanNode::Close";
     RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::CLOSE));
     SCOPED_TIMER(_runtime_profile->total_time_counter());
     Expr::close(_pushdown_conjunct_ctxs, state);
@@ -275,7 +275,7 @@ Status EsScanNode::close(RuntimeState* state) {
             }
 
             try {
-                VLOG(1) << "es close param=" << apache::thrift::ThriftDebugString(params);
+                VLOG_CRITICAL << "es close param=" << apache::thrift::ThriftDebugString(params);
                 client->close(result, params);
             } catch (apache::thrift::transport::TTransportException& e) {
                 LOG(WARNING) << "es close retrying, because: " << e.what();
@@ -289,7 +289,7 @@ Status EsScanNode::close(RuntimeState* state) {
             return Status::ThriftRpcError(ss.str());
         }
 
-        VLOG(1) << "es close result=" << apache::thrift::ThriftDebugString(result);
+        VLOG_CRITICAL << "es close result=" << apache::thrift::ThriftDebugString(result);
         Status status(result.status);
         if (!status.ok()) {
             LOG(WARNING) << "es close error: : scan_range_idx=" << i
@@ -329,7 +329,7 @@ Status EsScanNode::set_scan_ranges(const std::vector<TScanRangeParams>& scan_ran
 
 Status EsScanNode::open_es(TNetworkAddress& address, TExtOpenResult& result,
                            TExtOpenParams& params) {
-    VLOG(1) << "es open param=" << apache::thrift::ThriftDebugString(params);
+    VLOG_CRITICAL << "es open param=" << apache::thrift::ThriftDebugString(params);
 #ifndef BE_TEST
     try {
         ExtDataSourceServiceClientCache* client_cache = _env->extdatasource_client_cache();
@@ -349,7 +349,7 @@ Status EsScanNode::open_es(TNetworkAddress& address, TExtOpenResult& result,
             RETURN_IF_ERROR(client.reopen());
             client->open(result, params);
         }
-        VLOG(1) << "es open result=" << apache::thrift::ThriftDebugString(result);
+        VLOG_CRITICAL << "es open result=" << apache::thrift::ThriftDebugString(result);
         return Status(result.status);
     } catch (apache::thrift::TException& e) {
         std::stringstream ss;
@@ -393,7 +393,7 @@ bool EsScanNode::get_disjuncts(ExprContext* context, Expr* conjunct,
                                std::vector<TExtPredicate>& disjuncts) {
     if (TExprNodeType::BINARY_PRED == conjunct->node_type()) {
         if (conjunct->children().size() != 2) {
-            VLOG(1) << "get disjuncts fail: number of children is not 2";
+            VLOG_CRITICAL << "get disjuncts fail: number of children is not 2";
             return false;
         }
         SlotRef* slotRef;
@@ -408,19 +408,19 @@ bool EsScanNode::get_disjuncts(ExprContext* context, Expr* conjunct,
             slotRef = (SlotRef*)(conjunct->get_child(1));
             op = conjunct->op();
         } else {
-            VLOG(1) << "get disjuncts fail: no SLOT_REF child";
+            VLOG_CRITICAL << "get disjuncts fail: no SLOT_REF child";
             return false;
         }
 
         SlotDescriptor* slot_desc = get_slot_desc(slotRef);
         if (slot_desc == nullptr) {
-            VLOG(1) << "get disjuncts fail: slot_desc is null";
+            VLOG_CRITICAL << "get disjuncts fail: slot_desc is null";
             return false;
         }
 
         TExtLiteral literal;
         if (!to_ext_literal(context, expr, &literal)) {
-            VLOG(1) << "get disjuncts fail: can't get literal, node_type=" << expr->node_type();
+            VLOG_CRITICAL << "get disjuncts fail: can't get literal, node_type=" << expr->node_type();
             return false;
         }
 
@@ -445,7 +445,7 @@ bool EsScanNode::get_disjuncts(ExprContext* context, Expr* conjunct,
 
         TExtLiteral literal;
         if (!to_ext_literal(context, conjunct->get_child(1), &literal)) {
-            VLOG(1) << "get disjuncts fail: can't get literal, node_type="
+            VLOG_CRITICAL << "get disjuncts fail: can't get literal, node_type="
                     << conjunct->get_child(1)->node_type();
             return false;
         }
@@ -496,7 +496,7 @@ bool EsScanNode::get_disjuncts(ExprContext* context, Expr* conjunct,
             TExtLiteral literal;
             if (!to_ext_literal(slot_desc->type().type, const_cast<void*>(iter->get_value()),
                                 &literal)) {
-                VLOG(1) << "get disjuncts fail: can't get literal, node_type="
+                VLOG_CRITICAL << "get disjuncts fail: can't get literal, node_type="
                         << slot_desc->type().type;
                 return false;
             }
@@ -511,7 +511,7 @@ bool EsScanNode::get_disjuncts(ExprContext* context, Expr* conjunct,
         return true;
     } else if (TExprNodeType::COMPOUND_PRED == conjunct->node_type()) {
         if (TExprOpcode::COMPOUND_OR != conjunct->op()) {
-            VLOG(1) << "get disjuncts fail: op is not COMPOUND_OR";
+            VLOG_CRITICAL << "get disjuncts fail: op is not COMPOUND_OR";
             return false;
         }
         if (!get_disjuncts(context, conjunct->get_child(0), disjuncts)) {
@@ -522,7 +522,7 @@ bool EsScanNode::get_disjuncts(ExprContext* context, Expr* conjunct,
         }
         return true;
     } else {
-        VLOG(1) << "get disjuncts fail: node type is " << conjunct->node_type()
+        VLOG_CRITICAL << "get disjuncts fail: node type is " << conjunct->node_type()
                 << ", should be BINARY_PRED or COMPOUND_PRED";
         return false;
     }
@@ -688,7 +688,7 @@ Status EsScanNode::get_next_from_es(TExtGetNextResult& result) {
         }
 
         try {
-            VLOG(1) << "es get_next param=" << apache::thrift::ThriftDebugString(params);
+            VLOG_CRITICAL << "es get_next param=" << apache::thrift::ThriftDebugString(params);
             client->getNext(result, params);
         } catch (apache::thrift::transport::TTransportException& e) {
             std::stringstream ss;
@@ -725,7 +725,7 @@ Status EsScanNode::get_next_from_es(TExtGetNextResult& result) {
 #endif
 
     // check result
-    VLOG(1) << "es get_next result=" << apache::thrift::ThriftDebugString(result);
+    VLOG_CRITICAL << "es get_next result=" << apache::thrift::ThriftDebugString(result);
     Status get_next_status(result.status);
     if (!get_next_status.ok()) {
         LOG(WARNING) << "es get_next error: scan_range_idx=" << _scan_range_idx
diff --git a/be/src/exec/exec_node.cpp b/be/src/exec/exec_node.cpp
index 81c5564..245d42b 100644
--- a/be/src/exec/exec_node.cpp
+++ b/be/src/exec/exec_node.cpp
@@ -330,7 +330,7 @@ Status ExecNode::create_node(RuntimeState* state, ObjectPool* pool, const TPlanN
                              const DescriptorTbl& descs, ExecNode** node) {
     std::stringstream error_msg;
 
-    VLOG(2) << "tnode:\n" << apache::thrift::ThriftDebugString(tnode);
+    VLOG_CRITICAL << "tnode:\n" << apache::thrift::ThriftDebugString(tnode);
     switch (tnode.node_type) {
     case TPlanNodeType::CSV_SCAN_NODE:
         *node = pool->add(new CsvScanNode(pool, tnode, descs));
diff --git a/be/src/exec/json_scanner.cpp b/be/src/exec/json_scanner.cpp
index 4433f17..3431be4 100644
--- a/be/src/exec/json_scanner.cpp
+++ b/be/src/exec/json_scanner.cpp
@@ -122,7 +122,7 @@ Status JsonScanner::open_next_reader() {
     case TFileType::FILE_STREAM: {
         _stream_load_pipe = _state->exec_env()->load_stream_mgr()->get(range.load_id);
         if (_stream_load_pipe == nullptr) {
-            VLOG(3) << "unknown stream load id: " << UniqueId(range.load_id);
+            VLOG_NOTICE << "unknown stream load id: " << UniqueId(range.load_id);
             return Status::InternalError("unknown stream load id");
         }
         file = _stream_load_pipe.get();
diff --git a/be/src/exec/mysql_scan_node.cpp b/be/src/exec/mysql_scan_node.cpp
index 79775ed..9e3c39d 100644
--- a/be/src/exec/mysql_scan_node.cpp
+++ b/be/src/exec/mysql_scan_node.cpp
@@ -41,7 +41,7 @@ MysqlScanNode::MysqlScanNode(ObjectPool* pool, const TPlanNode& tnode, const Des
 MysqlScanNode::~MysqlScanNode() {}
 
 Status MysqlScanNode::prepare(RuntimeState* state) {
-    VLOG(1) << "MysqlScanNode::Prepare";
+    VLOG_CRITICAL << "MysqlScanNode::Prepare";
 
     if (_is_init) {
         return Status::OK();
@@ -99,7 +99,7 @@ Status MysqlScanNode::prepare(RuntimeState* state) {
 
 Status MysqlScanNode::open(RuntimeState* state) {
     RETURN_IF_ERROR(ExecNode::open(state));
-    VLOG(1) << "MysqlScanNode::Open";
+    VLOG_CRITICAL << "MysqlScanNode::Open";
 
     if (NULL == state) {
         return Status::InternalError("input pointer is NULL.");
@@ -144,7 +144,7 @@ Status MysqlScanNode::write_text_slot(char* value, int value_length, SlotDescrip
 }
 
 Status MysqlScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) {
-    VLOG(1) << "MysqlScanNode::GetNext";
+    VLOG_CRITICAL << "MysqlScanNode::GetNext";
 
     if (NULL == state || NULL == row_batch || NULL == eos) {
         return Status::InternalError("input is NULL pointer");
diff --git a/be/src/exec/mysql_scanner.cpp b/be/src/exec/mysql_scanner.cpp
index 0b82c4f..84fd58d 100644
--- a/be/src/exec/mysql_scanner.cpp
+++ b/be/src/exec/mysql_scanner.cpp
@@ -51,7 +51,7 @@ Status MysqlScanner::open() {
         return Status::InternalError("mysql init failed.");
     }
 
-    VLOG(1) << "MysqlScanner::Connect";
+    VLOG_CRITICAL << "MysqlScanner::Connect";
 
     if (NULL == mysql_real_connect(_my_conn, _my_param.host.c_str(), _my_param.user.c_str(),
                                    _my_param.passwd.c_str(), _my_param.db.c_str(),
diff --git a/be/src/exec/odbc_scan_node.cpp b/be/src/exec/odbc_scan_node.cpp
index b1d86fd..3e09a8f 100644
--- a/be/src/exec/odbc_scan_node.cpp
+++ b/be/src/exec/odbc_scan_node.cpp
@@ -41,7 +41,7 @@ OdbcScanNode::OdbcScanNode(ObjectPool* pool, const TPlanNode& tnode, const Descr
 OdbcScanNode::~OdbcScanNode() {}
 
 Status OdbcScanNode::prepare(RuntimeState* state) {
-    VLOG(1) << "OdbcScanNode::Prepare";
+    VLOG_CRITICAL << "OdbcScanNode::Prepare";
 
     if (_is_init) {
         return Status::OK();
@@ -90,7 +90,7 @@ Status OdbcScanNode::prepare(RuntimeState* state) {
 
 Status OdbcScanNode::open(RuntimeState* state) {
     RETURN_IF_ERROR(ExecNode::open(state));
-    VLOG(1) << "OdbcScanNode::Open";
+    VLOG_CRITICAL << "OdbcScanNode::Open";
 
     if (NULL == state) {
         return Status::InternalError("input pointer is NULL.");
@@ -123,7 +123,7 @@ Status OdbcScanNode::write_text_slot(char* value, int value_length, SlotDescript
 }
 
 Status OdbcScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) {
-    VLOG(1) << "OdbcScanNode::GetNext";
+    VLOG_CRITICAL << "OdbcScanNode::GetNext";
 
     if (NULL == state || NULL == row_batch || NULL == eos) {
         return Status::InternalError("input is NULL pointer");
diff --git a/be/src/exec/olap_scan_node.cpp b/be/src/exec/olap_scan_node.cpp
index 52635d4..cde915e 100644
--- a/be/src/exec/olap_scan_node.cpp
+++ b/be/src/exec/olap_scan_node.cpp
@@ -172,7 +172,7 @@ Status OlapScanNode::prepare(RuntimeState* state) {
 }
 
 Status OlapScanNode::open(RuntimeState* state) {
-    VLOG(1) << "OlapScanNode::Open";
+    VLOG_CRITICAL << "OlapScanNode::Open";
     SCOPED_TIMER(_runtime_profile->total_time_counter());
     RETURN_IF_CANCELLED(state);
     RETURN_IF_ERROR(ExecNode::open(state));
@@ -331,7 +331,7 @@ Status OlapScanNode::close(RuntimeState* state) {
         scanner->close(state);
     }
 
-    VLOG(1) << "OlapScanNode::close()";
+    VLOG_CRITICAL << "OlapScanNode::close()";
     return ScanNode::close(state);
 }
 
@@ -362,11 +362,11 @@ Status OlapScanNode::set_scan_ranges(const std::vector<TScanRangeParams>& scan_r
 Status OlapScanNode::start_scan(RuntimeState* state) {
     RETURN_IF_CANCELLED(state);
 
-    VLOG(1) << "Eval Const Conjuncts";
+    VLOG_CRITICAL << "Eval Const Conjuncts";
     // 1. Eval const conjuncts to find whether eos = true
     eval_const_conjuncts();
 
-    VLOG(1) << "NormalizeConjuncts";
+    VLOG_CRITICAL << "NormalizeConjuncts";
     // 2. Convert conjuncts to ColumnValueRange in each column, some conjuncts will
     // set eos = true
     RETURN_IF_ERROR(normalize_conjuncts());
@@ -376,19 +376,19 @@ Status OlapScanNode::start_scan(RuntimeState* state) {
         return Status::OK();
     }
 
-    VLOG(1) << "BuildOlapFilters";
+    VLOG_CRITICAL << "BuildOlapFilters";
     // 3. Using ColumnValueRange to Build StorageEngine filters
     RETURN_IF_ERROR(build_olap_filters());
 
-    VLOG(1) << "Filter idle conjuncts";
+    VLOG_CRITICAL << "Filter idle conjuncts";
     // 4. Filter idle conjunct which already trans to olap filters`
     remove_pushed_conjuncts(state);
 
-    VLOG(1) << "BuildScanKey";
+    VLOG_CRITICAL << "BuildScanKey";
     // 5. Using `Key Column`'s ColumnValueRange to split ScanRange to several `Sub ScanRange`
     RETURN_IF_ERROR(build_scan_key());
 
-    VLOG(1) << "StartScanThread";
+    VLOG_CRITICAL << "StartScanThread";
     // 6. Start multi thread to read several `Sub Sub ScanRange`
     RETURN_IF_ERROR(start_scan_thread(state));
 
@@ -526,7 +526,7 @@ Status OlapScanNode::normalize_conjuncts() {
         }
 
         default: {
-            VLOG(2) << "Unsupported Normalize Slot [ColName=" << slots[slot_idx]->col_name() << "]";
+            VLOG_CRITICAL << "Unsupported Normalize Slot [ColName=" << slots[slot_idx]->col_name() << "]";
             break;
         }
         }
@@ -575,7 +575,7 @@ Status OlapScanNode::build_scan_key() {
         RETURN_IF_ERROR(boost::apply_visitor(visitor, column_range_iter->second));
     }
 
-    VLOG(1) << _scan_keys.debug_string();
+    VLOG_CRITICAL << _scan_keys.debug_string();
 
     return Status::OK();
 }
@@ -723,7 +723,6 @@ Status OlapScanNode::start_scan_thread(RuntimeState* state) {
     std::stringstream ss;
     ss << "ScanThread complete (node=" << id() << "):";
     _progress = ProgressUpdater(ss.str(), _olap_scanners.size(), 1);
-    _progress.set_logging_level(1);
 
     _transfer_thread.add_thread(new boost::thread(&OlapScanNode::transfer_thread, this, state));
 
@@ -787,7 +786,7 @@ bool OlapScanNode::should_push_down_in_predicate(doris::SlotDescriptor *slot, do
         }
     }
 
-    VLOG(1) << slot->col_name() << " fixed_values add num: " << pred->hybrid_set()->size();
+    VLOG_CRITICAL << slot->col_name() << " fixed_values add num: " << pred->hybrid_set()->size();
 
     // if there are too many elements in InPredicate, exceed the limit,
     // we will not push any condition of this column to storage engine.
@@ -796,7 +795,7 @@ bool OlapScanNode::should_push_down_in_predicate(doris::SlotDescriptor *slot, do
     // ATTN: This is just an experience value. You may need to try
     // different thresholds to improve performance.
     if (pred->hybrid_set()->size() > _max_pushdown_conditions_per_column) {
-        VLOG(3) << "Predicate value num " << pred->hybrid_set()->size() << " exceed limit "
+        VLOG_NOTICE << "Predicate value num " << pred->hybrid_set()->size() << " exceed limit "
                 << _max_pushdown_conditions_per_column;
         return false;
     }
@@ -1101,7 +1100,7 @@ Status OlapScanNode::normalize_noneq_binary_predicate(SlotDescriptor* slot,
                     filter_conjuncts_index.emplace_back(conj_idx);
                 }
 
-                VLOG(1) << slot->col_name() << " op: "
+                VLOG_CRITICAL << slot->col_name() << " op: "
                         << static_cast<int>(to_olap_filter_type(pred->op(), child_idx))
                         << " value: " << *reinterpret_cast<T*>(value);
             }
@@ -1249,7 +1248,7 @@ void OlapScanNode::transfer_thread(RuntimeState* state) {
     }
 
     state->resource_pool()->release_thread_token(true);
-    VLOG(1) << "TransferThread finish.";
+    VLOG_CRITICAL << "TransferThread finish.";
     {
         std::unique_lock<std::mutex> l(_row_batches_lock);
         _transfer_done = true;
@@ -1258,7 +1257,7 @@ void OlapScanNode::transfer_thread(RuntimeState* state) {
 
     std::unique_lock<std::mutex> l(_scan_batches_lock);
     _scan_thread_exit_cv.wait(l, [this] { return _running_thread == 0; });
-    VLOG(1) << "Scanner threads have been exited. TransferThread exit.";
+    VLOG_CRITICAL << "Scanner threads have been exited. TransferThread exit.";
 }
 
 void OlapScanNode::scanner_thread(OlapScanner* scanner) {
@@ -1387,7 +1386,7 @@ Status OlapScanNode::add_one_batch(RowBatchInterface* row_batch) {
             _row_batch_consumed_cv.wait(l);
         }
 
-        VLOG(2) << "Push row_batch to materialized_row_batches";
+        VLOG_CRITICAL << "Push row_batch to materialized_row_batches";
         _materialized_row_batches.push_back(row_batch);
     }
     // remove one batch, notify main thread
diff --git a/be/src/exec/olap_scan_node.h b/be/src/exec/olap_scan_node.h
index b677c86..728e088 100644
--- a/be/src/exec/olap_scan_node.h
+++ b/be/src/exec/olap_scan_node.h
@@ -131,7 +131,7 @@ protected:
             h.pop();
         }
 
-        VLOG(1) << s.str() << "\n]";
+        VLOG_CRITICAL << s.str() << "\n]";
     }
 
     // In order to ensure the accuracy of the query result
diff --git a/be/src/exec/olap_scanner.cpp b/be/src/exec/olap_scanner.cpp
index 96c82b5..50e5430 100644
--- a/be/src/exec/olap_scanner.cpp
+++ b/be/src/exec/olap_scanner.cpp
@@ -354,7 +354,7 @@ Status OlapScanner::get_batch(RuntimeState* state, RowBatch* batch, bool* eof) {
                         if (pushdown_return_rate >
                             config::doris_max_pushdown_conjuncts_return_rate) {
                             _use_pushdown_conjuncts = false;
-                            VLOG(2) << "Stop Using PushDown Conjuncts. "
+                            VLOG_CRITICAL << "Stop Using PushDown Conjuncts. "
                                     << "PushDownReturnRate: " << pushdown_return_rate << "%"
                                     << " MaxPushDownReturnRate: "
                                     << config::doris_max_pushdown_conjuncts_return_rate << "%";
diff --git a/be/src/exec/olap_utils.h b/be/src/exec/olap_utils.h
index e08debf..3f56fed 100644
--- a/be/src/exec/olap_utils.h
+++ b/be/src/exec/olap_utils.h
@@ -208,7 +208,7 @@ inline SQLFilterOp to_olap_filter_type(TExprOpcode::type type, bool opposite) {
         return opposite ? FILTER_IN : FILTER_NOT_IN;
 
     default:
-        VLOG(1) << "TExprOpcode: " << type;
+        VLOG_CRITICAL << "TExprOpcode: " << type;
         DCHECK(false);
     }
 
diff --git a/be/src/exec/parquet_reader.cpp b/be/src/exec/parquet_reader.cpp
index 27a486d..3472ad7 100644
--- a/be/src/exec/parquet_reader.cpp
+++ b/be/src/exec/parquet_reader.cpp
@@ -173,7 +173,7 @@ inline Status ParquetReaderWrap::set_field_null(Tuple* tuple, const SlotDescript
 Status ParquetReaderWrap::read_record_batch(const std::vector<SlotDescriptor*>& tuple_slot_descs,
                                             bool* eof) {
     if (_current_line_of_group >= _rows_of_group) { // read next row group
-        VLOG(7) << "read_record_batch, current group id:" << _current_group
+        VLOG_DEBUG << "read_record_batch, current group id:" << _current_group
                 << " current line of group:" << _current_line_of_group
                 << " is larger than rows group size:" << _rows_of_group
                 << ". start to read next row group";
@@ -198,7 +198,7 @@ Status ParquetReaderWrap::read_record_batch(const std::vector<SlotDescriptor*>&
         }
         _current_line_of_batch = 0;
     } else if (_current_line_of_batch >= _batch->num_rows()) {
-        VLOG(7) << "read_record_batch, current group id:" << _current_group
+        VLOG_DEBUG << "read_record_batch, current group id:" << _current_group
                 << " current line of batch:" << _current_line_of_batch
                 << " is larger than batch size:" << _batch->num_rows()
                 << ". start to read next batch";
diff --git a/be/src/exec/partitioned_aggregation_node.cc b/be/src/exec/partitioned_aggregation_node.cc
index 35cf67b..bb74cdf 100644
--- a/be/src/exec/partitioned_aggregation_node.cc
+++ b/be/src/exec/partitioned_aggregation_node.cc
@@ -1425,7 +1425,7 @@ Status PartitionedAggregationNode::MoveHashPartitions(int64_t num_input_rows) {
             aggregated_partitions_.push_back(partition);
         }
     }
-    VLOG(2) << ss.str();
+    VLOG_CRITICAL << ss.str();
     hash_partitions_.clear();
     return Status::OK();
 }
diff --git a/be/src/exec/partitioned_hash_table.cc b/be/src/exec/partitioned_hash_table.cc
index a50bf8f..97b36e8 100644
--- a/be/src/exec/partitioned_hash_table.cc
+++ b/be/src/exec/partitioned_hash_table.cc
@@ -448,7 +448,7 @@ void PartitionedHashTable::Close() {
     const int64_t LARGE_HT = 128 * 1024;
     const int64_t HEAVILY_USED = 1024 * 1024;
     // TODO: These statistics should go to the runtime profile as well.
-    if ((num_buckets_ > LARGE_HT) || (num_probes_ > HEAVILY_USED)) VLOG(2) << PrintStats();
+    if ((num_buckets_ > LARGE_HT) || (num_probes_ > HEAVILY_USED)) VLOG_CRITICAL << PrintStats();
     for (auto& data_page : data_pages_) allocator_->Free(move(data_page));
     data_pages_.clear();
     if (bucket_allocation_ != nullptr) allocator_->Free(move(bucket_allocation_));
@@ -474,7 +474,7 @@ Status PartitionedHashTable::ResizeBuckets(int64_t num_buckets,
     DCHECK_GT(num_buckets, num_filled_buckets_)
             << "Cannot shrink the hash table to smaller number of buckets than the number of "
             << "filled buckets.";
-    VLOG(2) << "Resizing hash table from " << num_buckets_ << " to " << num_buckets << " buckets.";
+    VLOG_CRITICAL << "Resizing hash table from " << num_buckets_ << " to " << num_buckets << " buckets.";
     if (max_num_buckets_ != -1 && num_buckets > max_num_buckets_) {
         *got_memory = false;
         return Status::OK();
diff --git a/be/src/exec/tablet_sink.cpp b/be/src/exec/tablet_sink.cpp
index 1567721..a8f84b5 100644
--- a/be/src/exec/tablet_sink.cpp
+++ b/be/src/exec/tablet_sink.cpp
@@ -260,7 +260,7 @@ Status NodeChannel::close_wait(RuntimeState* state) {
         SleepFor(MonoDelta::FromMilliseconds(1));
     }
     timer.stop();
-    VLOG(1) << name() << " close_wait cost: " << timer.elapsed_time() / 1000000 << " ms";
+    VLOG_CRITICAL << name() << " close_wait cost: " << timer.elapsed_time() / 1000000 << " ms";
 
     if (_add_batches_finished) {
         {
diff --git a/be/src/exprs/json_functions.cpp b/be/src/exprs/json_functions.cpp
index 70f6cc3..ea37835 100644
--- a/be/src/exprs/json_functions.cpp
+++ b/be/src/exprs/json_functions.cpp
@@ -114,7 +114,7 @@ rapidjson::Value* JsonFunctions::match_value(const std::vector<JsonPath>& parsed
     rapidjson::Value* root = document;
     rapidjson::Value* array_obj = nullptr;
     for (int i = 1; i < parsed_paths.size(); i++) {
-        VLOG(10) << "parsed_paths: " << parsed_paths[i].debug_string();
+        VLOG_TRACE << "parsed_paths: " << parsed_paths[i].debug_string();
 
         if (root == nullptr || root->IsNull()) {
             return nullptr;
@@ -238,7 +238,7 @@ rapidjson::Value* JsonFunctions::get_json_object(FunctionContext* context,
     parsed_paths = &tmp_parsed_paths;
 #endif
 
-    VLOG(10) << "first parsed path: " << (*parsed_paths)[0].debug_string();
+    VLOG_TRACE << "first parsed path: " << (*parsed_paths)[0].debug_string();
 
     if (!(*parsed_paths)[0].is_valid) {
         return document;
@@ -255,7 +255,7 @@ rapidjson::Value* JsonFunctions::get_json_object(FunctionContext* context,
     //rapidjson::Document document;
     document->Parse(json_string.c_str());
     if (UNLIKELY(document->HasParseError())) {
-        VLOG(1) << "Error at offset " << document->GetErrorOffset() << ": "
+        VLOG_CRITICAL << "Error at offset " << document->GetErrorOffset() << ": "
                 << GetParseError_En(document->GetParseError());
         document->SetNull();
         return document;
@@ -327,7 +327,7 @@ void JsonFunctions::json_path_prepare(doris_udf::FunctionContext* context,
     get_parsed_paths(path_exprs, parsed_paths);
 
     context->set_function_state(scope, parsed_paths);
-    VLOG(10) << "prepare json path. size: " << parsed_paths->size();
+    VLOG_TRACE << "prepare json path. size: " << parsed_paths->size();
 }
 
 void JsonFunctions::json_path_close(doris_udf::FunctionContext* context,
@@ -339,7 +339,7 @@ void JsonFunctions::json_path_close(doris_udf::FunctionContext* context,
             reinterpret_cast<std::vector<JsonPath>*>(context->get_function_state(scope));
     if (parsed_paths != nullptr) {
         delete parsed_paths;
-        VLOG(10) << "close json path";
+        VLOG_TRACE << "close json path";
     }
 }
 
diff --git a/be/src/exprs/timestamp_functions.cpp b/be/src/exprs/timestamp_functions.cpp
index cefd2eb..6a6d9b0 100644
--- a/be/src/exprs/timestamp_functions.cpp
+++ b/be/src/exprs/timestamp_functions.cpp
@@ -457,7 +457,7 @@ void TimestampFunctions::format_prepare(doris_udf::FunctionContext* context,
     if (scope != FunctionContext::FRAGMENT_LOCAL || context->get_num_args() < 2 ||
         context->get_arg_type(1)->type != doris_udf::FunctionContext::Type::TYPE_VARCHAR ||
         !context->is_arg_constant(1)) {
-        VLOG(10) << "format_prepare returned";
+        VLOG_TRACE << "format_prepare returned";
         return;
     }
 
diff --git a/be/src/gutil/atomicops-internals-x86.cc b/be/src/gutil/atomicops-internals-x86.cc
index 68b9704..26c6639 100644
--- a/be/src/gutil/atomicops-internals-x86.cc
+++ b/be/src/gutil/atomicops-internals-x86.cc
@@ -95,7 +95,7 @@ static void AtomicOps_Internalx86CPUFeaturesInit() {
     // ecx bit 13 indicates whether the cmpxchg16b instruction is supported
     GutilAtomicOps_Internalx86CPUFeatures.has_cmpxchg16b = ((ecx >> 13) & 1);
 
-    VLOG(1) << "vendor " << vendor << "  family " << family << "  model " << model << "  sse2 "
+    VLOG_CRITICAL << "vendor " << vendor << "  family " << family << "  model " << model << "  sse2 "
             << GutilAtomicOps_Internalx86CPUFeatures.has_sse2 << "  cmpxchg16b "
             << GutilAtomicOps_Internalx86CPUFeatures.has_cmpxchg16b;
 }
diff --git a/be/src/gutil/strings/split.cc b/be/src/gutil/strings/split.cc
index 0e86563..62a6d33 100644
--- a/be/src/gutil/strings/split.cc
+++ b/be/src/gutil/strings/split.cc
@@ -906,7 +906,7 @@ bool SplitStringIntoKeyValues(const string& line, const string& key_value_delimi
     // find the key string
     size_t end_key_pos = line.find_first_of(key_value_delimiters);
     if (end_key_pos == string::npos) {
-        VLOG(1) << "cannot parse key from line: " << line;
+        VLOG_CRITICAL << "cannot parse key from line: " << line;
         return false; // no key
     }
     key->assign(line, 0, end_key_pos);
@@ -915,7 +915,7 @@ bool SplitStringIntoKeyValues(const string& line, const string& key_value_delimi
     string remains(line, end_key_pos, line.size() - end_key_pos);
     size_t begin_values_pos = remains.find_first_not_of(key_value_delimiters);
     if (begin_values_pos == string::npos) {
-        VLOG(1) << "cannot parse value from line: " << line;
+        VLOG_CRITICAL << "cannot parse value from line: " << line;
         return false; // no value
     }
     string values_string(remains, begin_values_pos, remains.size() - begin_values_pos);
@@ -926,7 +926,7 @@ bool SplitStringIntoKeyValues(const string& line, const string& key_value_delimi
     } else { // multiple values
         SplitStringUsing(values_string, value_value_delimiters.c_str(), values);
         if (values->size() < 1) {
-            VLOG(1) << "cannot parse value from line: " << line;
+            VLOG_CRITICAL << "cannot parse value from line: " << line;
             return false; // no value
         }
     }
diff --git a/be/src/http/action/mini_load.cpp b/be/src/http/action/mini_load.cpp
index 879bc34..ea234a0 100644
--- a/be/src/http/action/mini_load.cpp
+++ b/be/src/http/action/mini_load.cpp
@@ -831,7 +831,7 @@ Status MiniLoadAction::_process_put(HttpRequest* req, StreamLoadContext* ctx) {
                      << ctx->brief();
         return plan_status;
     }
-    VLOG(3) << "params is " << apache::thrift::ThriftDebugString(ctx->put_result.params);
+    VLOG_NOTICE << "params is " << apache::thrift::ThriftDebugString(ctx->put_result.params);
     return Status::OK();
 }
 
diff --git a/be/src/http/action/stream_load.cpp b/be/src/http/action/stream_load.cpp
index 1c9b7c3..47bfdb3 100644
--- a/be/src/http/action/stream_load.cpp
+++ b/be/src/http/action/stream_load.cpp
@@ -476,7 +476,7 @@ Status StreamLoadAction::_process_put(HttpRequest* http_req, StreamLoadContext*
                      << ctx->brief();
         return plan_status;
     }
-    VLOG(3) << "params is " << apache::thrift::ThriftDebugString(ctx->put_result.params);
+    VLOG_NOTICE << "params is " << apache::thrift::ThriftDebugString(ctx->put_result.params);
     // if we not use streaming, we must download total content before we begin
     // to process this load
     if (!ctx->use_streaming) {
diff --git a/be/src/http/utils.cpp b/be/src/http/utils.cpp
index 42c1e4d..d43c02b 100644
--- a/be/src/http/utils.cpp
+++ b/be/src/http/utils.cpp
@@ -89,7 +89,7 @@ bool parse_basic_auth(const HttpRequest& req, AuthInfo* auth) {
 // Do a simple decision, only deal a few type
 std::string get_content_type(const std::string& file_name) {
     std::string file_ext = path_util::file_extension(file_name);
-    VLOG(10) << "file_name: " << file_name << "; file extension: [" << file_ext << "]";
+    VLOG_TRACE << "file_name: " << file_name << "; file extension: [" << file_ext << "]";
     if (file_ext == std::string(".html") || file_ext == std::string(".htm")) {
         return std::string("text/html; charset=utf-8");
     } else if (file_ext == std::string(".js")) {
diff --git a/be/src/http/web_page_handler.cpp b/be/src/http/web_page_handler.cpp
index c4c01c0..98eb9fc 100644
--- a/be/src/http/web_page_handler.cpp
+++ b/be/src/http/web_page_handler.cpp
@@ -82,7 +82,7 @@ void WebPageHandler::register_page(const std::string& path, const string& alias,
 }
 
 void WebPageHandler::handle(HttpRequest* req) {
-    VLOG(10) << req->debug_string();
+    VLOG_TRACE << req->debug_string();
     PathHandler* handler = nullptr;
     {
         boost::mutex::scoped_lock lock(_map_lock);
diff --git a/be/src/olap/cumulative_compaction.cpp b/be/src/olap/cumulative_compaction.cpp
index 7f084b9..dfa9589 100644
--- a/be/src/olap/cumulative_compaction.cpp
+++ b/be/src/olap/cumulative_compaction.cpp
@@ -45,7 +45,7 @@ OLAPStatus CumulativeCompaction::prepare_compact() {
     // 1.calculate cumulative point
     _tablet->calculate_cumulative_point();
     TRACE("calculated cumulative point");
-    VLOG(1) << "after calculate, current cumulative point is " << _tablet->cumulative_layer_point()
+    VLOG_CRITICAL << "after calculate, current cumulative point is " << _tablet->cumulative_layer_point()
             << ", tablet=" << _tablet->full_name();
 
     // 2. pick rowsets to compact
diff --git a/be/src/olap/cumulative_compaction_policy.cpp b/be/src/olap/cumulative_compaction_policy.cpp
index c29ab06..1f026ab 100644
--- a/be/src/olap/cumulative_compaction_policy.cpp
+++ b/be/src/olap/cumulative_compaction_policy.cpp
@@ -98,7 +98,7 @@ void SizeBasedCumulativeCompactionPolicy::calculate_cumulative_point(
         prev_version = rs->version().second;
         *ret_cumulative_point = prev_version + 1;
     }
-    VLOG(3) << "cumulative compaction size_based policy, calculate cumulative point value = "
+    VLOG_NOTICE << "cumulative compaction size_based policy, calculate cumulative point value = "
             << *ret_cumulative_point << ", calc promotion size value = " << promotion_size
             << " tablet = " << tablet->full_name();
 }
@@ -268,7 +268,7 @@ int SizeBasedCumulativeCompactionPolicy::pick_input_rowsets(
         rs_iter = input_rowsets->erase(rs_iter);
     }
 
-    VLOG(1) << "cumulative compaction size_based policy, compaction_score = " << *compaction_score
+    VLOG_CRITICAL << "cumulative compaction size_based policy, compaction_score = " << *compaction_score
             << ", total_size = " << total_size << ", calc promotion size value = " << promotion_size
             << ", tablet = " << tablet->full_name() << ", input_rowset size "
             << input_rowsets->size();
diff --git a/be/src/olap/delete_handler.cpp b/be/src/olap/delete_handler.cpp
index 06bd087..94fca29 100644
--- a/be/src/olap/delete_handler.cpp
+++ b/be/src/olap/delete_handler.cpp
@@ -210,7 +210,7 @@ bool DeleteHandler::_parse_condition(const std::string& condition_str, TConditio
             matched = false;
         }
     } catch (regex_error& e) {
-        VLOG(3) << "fail to parse expr. [expr=" << condition_str << "; error=" << e.what() << "]";
+        VLOG_NOTICE << "fail to parse expr. [expr=" << condition_str << "; error=" << e.what() << "]";
         matched = false;
     }
 
diff --git a/be/src/olap/delta_writer.cpp b/be/src/olap/delta_writer.cpp
index ae62704..5339a47 100644
--- a/be/src/olap/delta_writer.cpp
+++ b/be/src/olap/delta_writer.cpp
@@ -199,7 +199,7 @@ OLAPStatus DeltaWriter::_flush_memtable_async() {
 OLAPStatus DeltaWriter::flush_memtable_and_wait() {
     if (mem_consumption() == _mem_table->memory_usage()) {
         // equal means there is no memtable in flush queue, just flush this memtable
-        VLOG(3) << "flush memtable to reduce mem consumption. memtable size: "
+        VLOG_NOTICE << "flush memtable to reduce mem consumption. memtable size: "
                 << _mem_table->memory_usage() << ", tablet: " << _req.tablet_id
                 << ", load id: " << print_id(_req.load_id);
         RETURN_NOT_OK(_flush_memtable_async());
diff --git a/be/src/olap/file_helper.cpp b/be/src/olap/file_helper.cpp
index 35cc806..2ffd8f1 100644
--- a/be/src/olap/file_helper.cpp
+++ b/be/src/olap/file_helper.cpp
@@ -80,7 +80,7 @@ OLAPStatus FileHandler::open(const string& file_name, int flag) {
         return OLAP_ERR_IO_ERROR;
     }
 
-    VLOG(3) << "success to open file. file_name=" << file_name << ", mode=" << flag
+    VLOG_NOTICE << "success to open file. file_name=" << file_name << ", mode=" << flag
             << " fd=" << _fd;
     _is_using_cache = false;
     _file_name = file_name;
@@ -106,7 +106,7 @@ OLAPStatus FileHandler::open_with_cache(const string& file_name, int flag) {
         FileDescriptor* file_desc =
                 reinterpret_cast<FileDescriptor*>(_s_fd_cache->value(_cache_handle));
         _fd = file_desc->fd;
-        VLOG(3) << "success to open file with cache. file_name=" << file_name << ", mode=" << flag
+        VLOG_NOTICE << "success to open file with cache. file_name=" << file_name << ", mode=" << flag
                 << " fd=" << _fd;
     } else {
         _fd = ::open(file_name.c_str(), flag);
@@ -121,7 +121,7 @@ OLAPStatus FileHandler::open_with_cache(const string& file_name, int flag) {
         }
         FileDescriptor* file_desc = new FileDescriptor(_fd);
         _cache_handle = _s_fd_cache->insert(key, file_desc, 1, &_delete_cache_file_descriptor);
-        VLOG(3) << "success to open file with cache. "
+        VLOG_NOTICE << "success to open file with cache. "
                 << "file_name=" << file_name << ", mode=" << flag << ", fd=" << _fd;
     }
     _is_using_cache = true;
@@ -150,7 +150,7 @@ OLAPStatus FileHandler::open_with_mode(const string& file_name, int flag, int mo
         return OLAP_ERR_IO_ERROR;
     }
 
-    VLOG(3) << "success to open file. file_name=" << file_name << ", mode=" << mode
+    VLOG_NOTICE << "success to open file. file_name=" << file_name << ", mode=" << mode
             << ", fd=" << _fd;
     _file_name = file_name;
     return OLAP_SUCCESS;
@@ -188,7 +188,7 @@ OLAPStatus FileHandler::close() {
         }
     }
 
-    VLOG(3) << "finished to close file. "
+    VLOG_NOTICE << "finished to close file. "
             << "file_name=" << _file_name << ", fd=" << _fd;
     _fd = -1;
     _file_name = "";
@@ -326,7 +326,7 @@ OLAPStatus FileHandlerWithBuf::open(const string& file_name, const char* mode) {
         return OLAP_ERR_IO_ERROR;
     }
 
-    VLOG(3) << "success to open file. "
+    VLOG_NOTICE << "success to open file. "
             << "file_name=" << file_name << ", mode=" << mode;
     _file_name = file_name;
     return OLAP_SUCCESS;
diff --git a/be/src/olap/file_helper.h b/be/src/olap/file_helper.h
index 4d714fc..e89cbd3 100644
--- a/be/src/olap/file_helper.h
+++ b/be/src/olap/file_helper.h
@@ -325,7 +325,7 @@ OLAPStatus FileHeader<MessageType, ExtraType, FileHandlerType>::unserialize(
     }
 
     if (_fixed_file_header.magic_number != OLAP_FIX_HEADER_MAGIC_NUMBER) {
-        VLOG(10) << "old fix header found, magic num=" << _fixed_file_header.magic_number;
+        VLOG_TRACE << "old fix header found, magic num=" << _fixed_file_header.magic_number;
         FixedFileHeader tmp_header;
 
         if (OLAP_SUCCESS != file_handler->pread(&tmp_header, sizeof(tmp_header), 0)) {
@@ -346,7 +346,7 @@ OLAPStatus FileHeader<MessageType, ExtraType, FileHandlerType>::unserialize(
         _fixed_file_header_size = sizeof(tmp_header);
     }
 
-    VLOG(3) << "fix head loaded. file_length=" << _fixed_file_header.file_length
+    VLOG_NOTICE << "fix head loaded. file_length=" << _fixed_file_header.file_length
             << ", checksum=" << _fixed_file_header.checksum
             << ", protobuf_length=" << _fixed_file_header.protobuf_length
             << ", magic_number=" << _fixed_file_header.magic_number
diff --git a/be/src/olap/file_stream.cpp b/be/src/olap/file_stream.cpp
index 2774105..1108e79 100644
--- a/be/src/olap/file_stream.cpp
+++ b/be/src/olap/file_stream.cpp
@@ -51,7 +51,7 @@ OLAPStatus ReadOnlyFileStream::_assure_data() {
     if (OLAP_LIKELY(_uncompressed != NULL && _uncompressed->remaining() > 0)) {
         return OLAP_SUCCESS;
     } else if (_file_cursor.eof()) {
-        VLOG(10) << "STREAM EOF. length=" << _file_cursor.length()
+        VLOG_TRACE << "STREAM EOF. length=" << _file_cursor.length()
                  << ", used=" << _file_cursor.position();
         return OLAP_ERR_COLUMN_STREAM_EOF;
     }
@@ -119,7 +119,7 @@ OLAPStatus ReadOnlyFileStream::seek(PositionProvider* position) {
         if (OLAP_LIKELY(OLAP_SUCCESS == res)) {
             // assure data will be successful in most case
         } else if (res == OLAP_ERR_COLUMN_STREAM_EOF) {
-            VLOG(10) << "file stream eof.";
+            VLOG_TRACE << "file stream eof.";
             return res;
         } else {
             OLAP_LOG_WARNING("fail to assure data after seek");
diff --git a/be/src/olap/fs/file_block_manager.cpp b/be/src/olap/fs/file_block_manager.cpp
index e29e3c9..37056f4 100644
--- a/be/src/olap/fs/file_block_manager.cpp
+++ b/be/src/olap/fs/file_block_manager.cpp
@@ -175,7 +175,7 @@ Status FileWritableBlock::appendv(const Slice* data, size_t data_cnt) {
 }
 
 Status FileWritableBlock::flush_data_async() {
-    VLOG(3) << "Flushing block " << _path;
+    VLOG_NOTICE << "Flushing block " << _path;
     RETURN_IF_ERROR(_writer->flush(WritableFile::FLUSH_ASYNC));
     return Status::OK();
 }
@@ -187,7 +187,7 @@ Status FileWritableBlock::finalize() {
     if (_state == FINALIZED) {
         return Status::OK();
     }
-    VLOG(3) << "Finalizing block " << _path;
+    VLOG_NOTICE << "Finalizing block " << _path;
     if (_state == DIRTY && BlockManager::block_manager_preflush_control == "finalize") {
         flush_data_async();
     }
@@ -211,7 +211,7 @@ Status FileWritableBlock::_close(SyncMode mode) {
     Status sync;
     if (mode == SYNC && (_state == CLEAN || _state == DIRTY || _state == FINALIZED)) {
         // Safer to synchronize data first, then metadata.
-        VLOG(3) << "Syncing block " << _path;
+        VLOG_NOTICE << "Syncing block " << _path;
         if (_block_manager->_metrics) {
             _block_manager->_metrics->total_disk_sync->increment(1);
         }
@@ -399,14 +399,14 @@ Status FileBlockManager::create_block(const CreateBlockOptions& opts,
     wr_opts.mode = Env::MUST_CREATE;
     RETURN_IF_ERROR(env_util::open_file_for_write(wr_opts, _env, opts.path, &writer));
 
-    VLOG(1) << "Creating new block at " << opts.path;
+    VLOG_CRITICAL << "Creating new block at " << opts.path;
     block->reset(new internal::FileWritableBlock(this, opts.path, writer));
     return Status::OK();
 }
 
 Status FileBlockManager::open_block(const std::string& path,
                                     std::unique_ptr<ReadableBlock>* block) {
-    VLOG(1) << "Opening block with path at " << path;
+    VLOG_CRITICAL << "Opening block with path at " << path;
     std::shared_ptr<OpenedFileHandle<RandomAccessFile>> file_handle(
             new OpenedFileHandle<RandomAccessFile>());
     bool found = _file_cache->lookup(path, file_handle.get());
diff --git a/be/src/olap/lru_cache.cpp b/be/src/olap/lru_cache.cpp
index 6fc79f1..6e795be 100644
--- a/be/src/olap/lru_cache.cpp
+++ b/be/src/olap/lru_cache.cpp
@@ -461,7 +461,7 @@ void ShardedLRUCache::prune() {
     for (int s = 0; s < kNumShards; s++) {
         num_prune += _shards[s].prune();
     }
-    VLOG(7) << "Successfully prune cache, clean " << num_prune << " entries.";
+    VLOG_DEBUG << "Successfully prune cache, clean " << num_prune << " entries.";
 }
 
 void ShardedLRUCache::update_cache_metrics() const {
diff --git a/be/src/olap/olap_cond.cpp b/be/src/olap/olap_cond.cpp
index 4508e2e..231da7b 100644
--- a/be/src/olap/olap_cond.cpp
+++ b/be/src/olap/olap_cond.cpp
@@ -623,7 +623,7 @@ bool Conditions::delete_conditions_eval(const RowCursor& row) const {
         }
     }
 
-    VLOG(3) << "Row meets the delete conditions. "
+    VLOG_NOTICE << "Row meets the delete conditions. "
             << "condition_count=" << _columns.size() << ", row=" << row.to_string();
     return true;
 }
diff --git a/be/src/olap/olap_index.cpp b/be/src/olap/olap_index.cpp
index 94a9b8a..e4adb74 100644
--- a/be/src/olap/olap_index.cpp
+++ b/be/src/olap/olap_index.cpp
@@ -358,8 +358,8 @@ const OLAPIndexOffset MemIndex::find(const RowCursor& k, RowCursor* helper_curso
         }
 
         offset.offset = *it;
-        VLOG(3) << "show real offset iterator value. off=" << *it;
-        VLOG(3) << "show result offset. seg_off=" << offset.segment << ", off=" << offset.offset;
+        VLOG_NOTICE << "show real offset iterator value. off=" << *it;
+        VLOG_NOTICE << "show result offset. seg_off=" << offset.segment << ", off=" << offset.offset;
     } catch (...) {
         OLAP_LOG_WARNING("fail to compare value in memindex. [cursor='%s' find_last=%d]",
                          k.to_string().c_str(), find_last);
diff --git a/be/src/olap/olap_snapshot_converter.cpp b/be/src/olap/olap_snapshot_converter.cpp
index 6eed350..bb6353a 100644
--- a/be/src/olap/olap_snapshot_converter.cpp
+++ b/be/src/olap/olap_snapshot_converter.cpp
@@ -200,7 +200,7 @@ OLAPStatus OlapSnapshotConverter::to_tablet_meta_pb(const OLAPHeaderMessage& ola
     }
     tablet_meta_pb->set_tablet_state(TabletStatePB::PB_RUNNING);
     *(tablet_meta_pb->mutable_tablet_uid()) = TabletUid::gen_uid().to_proto();
-    VLOG(3) << "convert tablet meta tablet id = " << olap_header.tablet_id()
+    VLOG_NOTICE << "convert tablet meta tablet id = " << olap_header.tablet_id()
             << " schema hash = " << olap_header.schema_hash() << " successfully.";
     return OLAP_SUCCESS;
 }
diff --git a/be/src/olap/out_stream.cpp b/be/src/olap/out_stream.cpp
index 9d8c940..8882085 100644
--- a/be/src/olap/out_stream.cpp
+++ b/be/src/olap/out_stream.cpp
@@ -344,7 +344,7 @@ OLAPStatus OutStream::write_to_file(FileHandler* file_handle, uint32_t write_mby
 
     for (std::vector<StorageByteBuffer*>::const_iterator it = _output_buffers.begin();
          it != _output_buffers.end(); ++it) {
-        VLOG(10) << "write stream begin:" << file_handle->tell();
+        VLOG_TRACE << "write stream begin:" << file_handle->tell();
 
         res = file_handle->write((*it)->array(), (*it)->limit());
         if (OLAP_SUCCESS != res) {
@@ -352,14 +352,14 @@ OLAPStatus OutStream::write_to_file(FileHandler* file_handle, uint32_t write_mby
             return res;
         }
 
-        VLOG(10) << "write stream end:" << file_handle->tell();
+        VLOG_TRACE << "write stream end:" << file_handle->tell();
 
         total_stream_len += (*it)->limit();
         if (write_mbytes_per_sec > 0) {
             uint64_t delta_time_us = speed_limit_watch.get_elapse_time_us();
             int64_t sleep_time = total_stream_len / write_mbytes_per_sec - delta_time_us;
             if (sleep_time > 0) {
-                VLOG(10) << "sleep to limit merge speed. time=" << sleep_time
+                VLOG_TRACE << "sleep to limit merge speed. time=" << sleep_time
                          << ", bytes=" << total_stream_len;
                 SleepFor(MonoDelta::FromMicroseconds(sleep_time));
             }
diff --git a/be/src/olap/out_stream.h b/be/src/olap/out_stream.h
index 232db9e..6473091 100644
--- a/be/src/olap/out_stream.h
+++ b/be/src/olap/out_stream.h
@@ -95,12 +95,12 @@ public:
     const std::vector<StorageByteBuffer*>& output_buffers() { return _output_buffers; }
 
     void print_position_debug_info() {
-        VLOG(10) << "compress: " << _spilled_bytes;
+        VLOG_TRACE << "compress: " << _spilled_bytes;
 
         if (_current != NULL) {
-            VLOG(10) << "uncompress=" << (_current->position() - sizeof(StreamHead));
+            VLOG_TRACE << "uncompress=" << (_current->position() - sizeof(StreamHead));
         } else {
-            VLOG(10) << "uncompress 0";
+            VLOG_TRACE << "uncompress 0";
         }
     }
 
diff --git a/be/src/olap/push_handler.cpp b/be/src/olap/push_handler.cpp
index b3d4305..fe54ef4 100644
--- a/be/src/olap/push_handler.cpp
+++ b/be/src/olap/push_handler.cpp
@@ -105,7 +105,7 @@ OLAPStatus PushHandler::_do_streaming_ingestion(TabletSharedPtr tablet, const TP
     // only when fe sends schema_change true, should consider to push related
     // tablet
     if (_request.is_schema_changing) {
-        VLOG(3) << "push req specify schema changing is true. "
+        VLOG_NOTICE << "push req specify schema changing is true. "
                 << "tablet=" << tablet->full_name()
                 << ", transaction_id=" << request.transaction_id;
         AlterTabletTaskSharedPtr alter_task = tablet->alter_task();
@@ -267,10 +267,10 @@ OLAPStatus PushHandler::_convert_v2(TabletSharedPtr cur_tablet, TabletSharedPtr
     load_id.set_lo(0);
 
     do {
-        VLOG(3) << "start to convert delta file.";
+        VLOG_NOTICE << "start to convert delta file.";
 
         // 1. init RowsetBuilder of cur_tablet for current push
-        VLOG(3) << "init rowset builder. tablet=" << cur_tablet->full_name()
+        VLOG_NOTICE << "init rowset builder. tablet=" << cur_tablet->full_name()
                 << ", block_row_size=" << cur_tablet->num_rows_per_row_block();
         RowsetWriterContext context;
         context.rowset_id = StorageEngine::instance()->next_rowset_id();
@@ -337,7 +337,7 @@ OLAPStatus PushHandler::_convert_v2(TabletSharedPtr cur_tablet, TabletSharedPtr
 
             // 4. Read data from broker and write into SegmentGroup of cur_tablet
             // Convert from raw to delta
-            VLOG(3) << "start to convert etl file to delta.";
+            VLOG_NOTICE << "start to convert etl file to delta.";
             while (!reader->eof()) {
                 res = reader->next(&row);
                 if (OLAP_SUCCESS != res) {
@@ -377,7 +377,7 @@ OLAPStatus PushHandler::_convert_v2(TabletSharedPtr cur_tablet, TabletSharedPtr
         _write_rows += (*cur_rowset)->num_rows();
 
         // 5. Convert data for schema change tables
-        VLOG(10) << "load to related tables of schema_change if possible.";
+        VLOG_TRACE << "load to related tables of schema_change if possible.";
         if (new_tablet != nullptr) {
             SchemaChangeHandler schema_change;
             res = schema_change.schema_version_convert(cur_tablet, new_tablet, cur_rowset,
@@ -390,7 +390,7 @@ OLAPStatus PushHandler::_convert_v2(TabletSharedPtr cur_tablet, TabletSharedPtr
         }
     } while (0);
 
-    VLOG(10) << "convert delta file end. res=" << res << ", tablet=" << cur_tablet->full_name()
+    VLOG_TRACE << "convert delta file end. res=" << res << ", tablet=" << cur_tablet->full_name()
              << ", processed_rows" << num_rows;
     return res;
 }
@@ -407,7 +407,7 @@ OLAPStatus PushHandler::_convert(TabletSharedPtr cur_tablet, TabletSharedPtr new
     load_id.set_lo(0);
 
     do {
-        VLOG(3) << "start to convert delta file.";
+        VLOG_NOTICE << "start to convert delta file.";
 
         // 1. Init BinaryReader to read raw file if exist,
         //    in case of empty push and delete data, this will be skipped.
@@ -453,7 +453,7 @@ OLAPStatus PushHandler::_convert(TabletSharedPtr cur_tablet, TabletSharedPtr new
         }
 
         // 2. init RowsetBuilder of cur_tablet for current push
-        VLOG(3) << "init RowsetBuilder.";
+        VLOG_NOTICE << "init RowsetBuilder.";
         RowsetWriterContext context;
         context.rowset_id = StorageEngine::instance()->next_rowset_id();
         context.tablet_uid = cur_tablet->tablet_uid();
@@ -483,7 +483,7 @@ OLAPStatus PushHandler::_convert(TabletSharedPtr cur_tablet, TabletSharedPtr new
         }
 
         // 3. New RowsetBuilder to write data into rowset
-        VLOG(3) << "init rowset builder. tablet=" << cur_tablet->full_name()
+        VLOG_NOTICE << "init rowset builder. tablet=" << cur_tablet->full_name()
                 << ", block_row_size=" << cur_tablet->num_rows_per_row_block();
 
         // 4. Init RowCursor
@@ -495,7 +495,7 @@ OLAPStatus PushHandler::_convert(TabletSharedPtr cur_tablet, TabletSharedPtr new
         // 5. Read data from raw file and write into SegmentGroup of cur_tablet
         if (_request.__isset.http_file_path) {
             // Convert from raw to delta
-            VLOG(3) << "start to convert row file to delta.";
+            VLOG_NOTICE << "start to convert row file to delta.";
             while (!reader->eof()) {
                 res = reader->next(&row);
                 if (OLAP_SUCCESS != res) {
@@ -538,7 +538,7 @@ OLAPStatus PushHandler::_convert(TabletSharedPtr cur_tablet, TabletSharedPtr new
         _write_rows += (*cur_rowset)->num_rows();
 
         // 7. Convert data for schema change tables
-        VLOG(10) << "load to related tables of schema_change if possible.";
+        VLOG_TRACE << "load to related tables of schema_change if possible.";
         if (new_tablet != nullptr) {
             SchemaChangeHandler schema_change;
             res = schema_change.schema_version_convert(cur_tablet, new_tablet, cur_rowset,
@@ -552,7 +552,7 @@ OLAPStatus PushHandler::_convert(TabletSharedPtr cur_tablet, TabletSharedPtr new
     } while (0);
 
     SAFE_DELETE(reader);
-    VLOG(10) << "convert delta file end. res=" << res << ", tablet=" << cur_tablet->full_name()
+    VLOG_TRACE << "convert delta file end. res=" << res << ", tablet=" << cur_tablet->full_name()
              << ", processed_rows" << num_rows;
     return res;
 }
diff --git a/be/src/olap/reader.cpp b/be/src/olap/reader.cpp
index 0b05c74..a40e5a9 100644
--- a/be/src/olap/reader.cpp
+++ b/be/src/olap/reader.cpp
@@ -281,7 +281,7 @@ OLAPStatus Reader::_unique_key_next_row(RowCursor* row_cursor, MemPool* mem_pool
 }
 
 void Reader::close() {
-    VLOG(3) << "merged rows:" << _merged_rows;
+    VLOG_NOTICE << "merged rows:" << _merged_rows;
     _conditions.finalize();
     _delete_handler.finalize();
 
@@ -320,7 +320,7 @@ OLAPStatus Reader::_capture_rs_readers(const ReaderParams& read_params) {
         bool is_lower_key_included = false;
         if (_keys_param.range == "gt") {
             if (end_key != nullptr && compare_row_key(*start_key, *end_key) >= 0) {
-                VLOG(3) << "return EOF when range=" << _keys_param.range
+                VLOG_NOTICE << "return EOF when range=" << _keys_param.range
                         << ", start_key=" << start_key->to_string()
                         << ", end_key=" << end_key->to_string();
                 eof = true;
@@ -329,7 +329,7 @@ OLAPStatus Reader::_capture_rs_readers(const ReaderParams& read_params) {
             is_lower_key_included = false;
         } else if (_keys_param.range == "ge") {
             if (end_key != nullptr && compare_row_key(*start_key, *end_key) > 0) {
-                VLOG(3) << "return EOF when range=" << _keys_param.range
+                VLOG_NOTICE << "return EOF when range=" << _keys_param.range
                         << ", start_key=" << start_key->to_string()
                         << ", end_key=" << end_key->to_string();
                 eof = true;
@@ -477,7 +477,7 @@ OLAPStatus Reader::_init_return_columns(const ReaderParams& read_params) {
                 _value_cids.push_back(i);
             }
         }
-        VLOG(3) << "return column is empty, using full column as default.";
+        VLOG_NOTICE << "return column is empty, using full column as default.";
     } else if (read_params.reader_type == READER_CHECKSUM) {
         _return_columns = read_params.return_columns;
         for (auto id : read_params.return_columns) {
diff --git a/be/src/olap/rowset/alpha_rowset.cpp b/be/src/olap/rowset/alpha_rowset.cpp
index 9ff8178..3e6ab99 100644
--- a/be/src/olap/rowset/alpha_rowset.cpp
+++ b/be/src/olap/rowset/alpha_rowset.cpp
@@ -64,7 +64,7 @@ OLAPStatus AlphaRowset::create_reader(const std::shared_ptr<MemTracker>& parent_
 }
 
 OLAPStatus AlphaRowset::remove() {
-    VLOG(3) << "begin to remove files in rowset " << unique_id() << ", version:" << start_version()
+    VLOG_NOTICE << "begin to remove files in rowset " << unique_id() << ", version:" << start_version()
             << "-" << end_version() << ", tabletid:" << _rowset_meta->tablet_id();
     for (auto segment_group : _segment_groups) {
         bool ret = segment_group->delete_all_files();
@@ -201,7 +201,7 @@ OLAPStatus AlphaRowset::split_range(const RowCursor& start_key, const RowCursor&
     }
 
     step_pos = start_pos;
-    VLOG(3) << "start_pos=" << start_pos.segment << ", " << start_pos.index_offset;
+    VLOG_NOTICE << "start_pos=" << start_pos.segment << ", " << start_pos.index_offset;
 
     //find last row_block is end_key is given, or using last_row_block
     if (largest_segment_group->find_short_key(end_key, &helper_cursor, false, &end_pos) !=
@@ -212,7 +212,7 @@ OLAPStatus AlphaRowset::split_range(const RowCursor& start_key, const RowCursor&
         }
     }
 
-    VLOG(3) << "end_pos=" << end_pos.segment << ", " << end_pos.index_offset;
+    VLOG_NOTICE << "end_pos=" << end_pos.segment << ", " << end_pos.index_offset;
 
     //get rows between first and last
     OLAPStatus res = OLAP_SUCCESS;
@@ -330,7 +330,7 @@ OLAPStatus AlphaRowset::init() {
             // table value column, so when first start the two number is not the same,
             // it causes start failed. When `expect_zone_maps_num > zone_maps_size` it may be the first start after upgrade
             if (expect_zone_maps_num > zone_maps_size) {
-                VLOG(1) << "tablet: " << _rowset_meta->tablet_id() << " expect zone map size is "
+                VLOG_CRITICAL << "tablet: " << _rowset_meta->tablet_id() << " expect zone map size is "
                         << expect_zone_maps_num << ", actual num is " << zone_maps_size
                         << ". If this is not the first start after upgrade, please pay attention!";
             }
diff --git a/be/src/olap/rowset/alpha_rowset_reader.cpp b/be/src/olap/rowset/alpha_rowset_reader.cpp
index 8993861..1abc6da 100644
--- a/be/src/olap/rowset/alpha_rowset_reader.cpp
+++ b/be/src/olap/rowset/alpha_rowset_reader.cpp
@@ -157,7 +157,7 @@ OLAPStatus AlphaRowsetReader::_merge_block(RowBlock** block) {
             return status;
         }
 
-        VLOG(10) << "get merged row: " << row_cursor->to_string();
+        VLOG_TRACE << "get merged row: " << row_cursor->to_string();
 
         _read_block->get_row(_read_block->pos(), _dst_cursor);
         copy_row(_dst_cursor, *row_cursor, _read_block->mem_pool());
@@ -374,7 +374,7 @@ OLAPStatus AlphaRowsetReader::_init_merge_ctxs(RowsetReaderContext* read_context
             // filter
             if (new_column_data->rowset_pruning_filter()) {
                 _stats->rows_stats_filtered += new_column_data->num_rows();
-                VLOG(3) << "filter segment group in query in condition. version="
+                VLOG_NOTICE << "filter segment group in query in condition. version="
                         << new_column_data->version();
                 continue;
             }
@@ -383,14 +383,14 @@ OLAPStatus AlphaRowsetReader::_init_merge_ctxs(RowsetReaderContext* read_context
         int ret = new_column_data->delete_pruning_filter();
         if (ret == DEL_SATISFIED) {
             _stats->rows_del_filtered += new_column_data->num_rows();
-            VLOG(3) << "filter segment group in delete predicate:" << new_column_data->version();
+            VLOG_NOTICE << "filter segment group in delete predicate:" << new_column_data->version();
             continue;
         } else if (ret == DEL_PARTIAL_SATISFIED) {
-            VLOG(3) << "filter segment group partially in delete predicate:"
+            VLOG_NOTICE << "filter segment group partially in delete predicate:"
                     << new_column_data->version();
             new_column_data->set_delete_status(DEL_PARTIAL_SATISFIED);
         } else {
-            VLOG(3) << "not filter segment group in delete predicate:"
+            VLOG_NOTICE << "not filter segment group in delete predicate:"
                     << new_column_data->version();
             new_column_data->set_delete_status(DEL_NOT_SATISFIED);
         }
diff --git a/be/src/olap/rowset/column_data.cpp b/be/src/olap/rowset/column_data.cpp
index cc8cec7..03dc867 100644
--- a/be/src/olap/rowset/column_data.cpp
+++ b/be/src/olap/rowset/column_data.cpp
@@ -159,7 +159,7 @@ OLAPStatus ColumnData::_seek_to_block(const RowBlockPosition& block_pos, bool wi
         end_block = _segment_reader->block_count() - 1;
     }
 
-    VLOG(3) << "seek from " << block_pos.data_offset << " to " << end_block;
+    VLOG_NOTICE << "seek from " << block_pos.data_offset << " to " << end_block;
     return _segment_reader->seek_to_block(block_pos.data_offset, end_block, without_filter,
                                           &_next_block, &_segment_eof);
 }
@@ -244,7 +244,7 @@ OLAPStatus ColumnData::_find_position_by_full_key(const RowCursor& key, bool fin
         } else {
             it_result = std::upper_bound(it_start, it_end, key, comparator);
         }
-        VLOG(3) << "get result iterator. offset=" << *it_result
+        VLOG_NOTICE << "get result iterator. offset=" << *it_result
                 << ", start_pos=" << start_position.to_string();
     } catch (std::exception& e) {
         LOG(WARNING) << "exception happens when doing seek. exception=" << e.what();
diff --git a/be/src/olap/rowset/column_data_writer.cpp b/be/src/olap/rowset/column_data_writer.cpp
index 3f5682d..33452a2 100644
--- a/be/src/olap/rowset/column_data_writer.cpp
+++ b/be/src/olap/rowset/column_data_writer.cpp
@@ -90,7 +90,7 @@ OLAPStatus ColumnDataWriter::init() {
         return res;
     }
 
-    VLOG(3) << "init ColumnData writer. segment_group_id=" << _segment_group->segment_group_id()
+    VLOG_NOTICE << "init ColumnData writer. segment_group_id=" << _segment_group->segment_group_id()
             << ", block_row_number=" << _segment_group->get_num_rows_per_row_block();
     RowBlockInfo block_info(0U, _segment_group->get_num_rows_per_row_block());
     block_info.null_supported = true;
diff --git a/be/src/olap/rowset/column_reader.cpp b/be/src/olap/rowset/column_reader.cpp
index c8d3e81..bd11999 100644
--- a/be/src/olap/rowset/column_reader.cpp
+++ b/be/src/olap/rowset/column_reader.cpp
@@ -679,7 +679,7 @@ OLAPStatus ColumnReader::init(std::map<StreamName, ReadOnlyFileStream*>* streams
         _present_reader = NULL;
         _value_present = false;
     } else {
-        VLOG(10) << "create null present_stream for column_id:" << _column_unique_id;
+        VLOG_TRACE << "create null present_stream for column_id:" << _column_unique_id;
         _present_reader = new (std::nothrow) BitFieldReader(present_stream);
 
         if (NULL == _present_reader) {
diff --git a/be/src/olap/rowset/rowset.h b/be/src/olap/rowset/rowset.h
index d9fc804..06ddbeb 100644
--- a/be/src/olap/rowset/rowset.h
+++ b/be/src/olap/rowset/rowset.h
@@ -190,7 +190,7 @@ public:
             LOG(WARNING) << "state transition failed from:" << _rowset_state_machine.rowset_state();
             return;
         }
-        VLOG(3) << "rowset is close. rowset state from:" << old_state << " to "
+        VLOG_NOTICE << "rowset is close. rowset state from:" << old_state << " to "
                 << _rowset_state_machine.rowset_state() << ", version:" << start_version() << "-"
                 << end_version() << ", tabletid:" << _rowset_meta->tablet_id();
     }
@@ -237,7 +237,7 @@ public:
                 }
             }
             if (_rowset_state_machine.rowset_state() == ROWSET_UNLOADED) {
-                VLOG(3) << "close the rowset. rowset state from ROWSET_UNLOADING to ROWSET_UNLOADED"
+                VLOG_NOTICE << "close the rowset. rowset state from ROWSET_UNLOADING to ROWSET_UNLOADED"
                         << ", version:" << start_version() << "-" << end_version()
                         << ", tabletid:" << _rowset_meta->tablet_id();
             }
diff --git a/be/src/olap/rowset/rowset_meta_manager.cpp b/be/src/olap/rowset/rowset_meta_manager.cpp
index 5fb9f54..85a226f 100644
--- a/be/src/olap/rowset/rowset_meta_manager.cpp
+++ b/be/src/olap/rowset/rowset_meta_manager.cpp
@@ -102,9 +102,9 @@ OLAPStatus RowsetMetaManager::save(OlapMeta* meta, TabletUid tablet_uid, const R
 OLAPStatus RowsetMetaManager::remove(OlapMeta* meta, TabletUid tablet_uid,
                                      const RowsetId& rowset_id) {
     std::string key = ROWSET_PREFIX + tablet_uid.to_string() + "_" + rowset_id.to_string();
-    VLOG(3) << "start to remove rowset, key:" << key;
+    VLOG_NOTICE << "start to remove rowset, key:" << key;
     OLAPStatus status = meta->remove(META_COLUMN_FAMILY_INDEX, key);
-    VLOG(3) << "remove rowset key:" << key << " finished";
+    VLOG_NOTICE << "remove rowset key:" << key << " finished";
     return status;
 }
 
diff --git a/be/src/olap/rowset/run_length_byte_reader.cpp b/be/src/olap/rowset/run_length_byte_reader.cpp
index f3e7f64..61c1ea3 100644
--- a/be/src/olap/rowset/run_length_byte_reader.cpp
+++ b/be/src/olap/rowset/run_length_byte_reader.cpp
@@ -96,7 +96,7 @@ OLAPStatus RunLengthByteReader::seek(PositionProvider* position) {
 
     res = _input->seek(position);
     if (OLAP_SUCCESS != res) {
-        VLOG(10) << "fail to ReadOnlyFileStream seek. res = " << res;
+        VLOG_TRACE << "fail to ReadOnlyFileStream seek. res = " << res;
         return res;
     }
 
diff --git a/be/src/olap/rowset/run_length_integer_writer.cpp b/be/src/olap/rowset/run_length_integer_writer.cpp
index f2876c5..2cd2b7c 100644
--- a/be/src/olap/rowset/run_length_integer_writer.cpp
+++ b/be/src/olap/rowset/run_length_integer_writer.cpp
@@ -727,7 +727,7 @@ void RunLengthIntegerWriter::get_position(PositionEntryWriter* index_entry, bool
 
     if (print) {
         _output->print_position_debug_info();
-        VLOG(10) << "literals=" << _num_literals;
+        VLOG_TRACE << "literals=" << _num_literals;
     }
 }
 
diff --git a/be/src/olap/rowset/run_length_integer_writer.h b/be/src/olap/rowset/run_length_integer_writer.h
index 1f2d842..11095c5 100644
--- a/be/src/olap/rowset/run_length_integer_writer.h
+++ b/be/src/olap/rowset/run_length_integer_writer.h
@@ -114,7 +114,7 @@ public:
 
     void print_position_debug_info() {
         _output->print_position_debug_info();
-        VLOG(10) << "_num_literals=" << _num_literals;
+        VLOG_TRACE << "_num_literals=" << _num_literals;
     }
 
 private:
diff --git a/be/src/olap/rowset/segment_group.cpp b/be/src/olap/rowset/segment_group.cpp
index aaa1528..762c4ca 100644
--- a/be/src/olap/rowset/segment_group.cpp
+++ b/be/src/olap/rowset/segment_group.cpp
@@ -221,7 +221,7 @@ bool SegmentGroup::delete_all_files() {
         string index_path = construct_index_file_path(seg_id);
         string data_path = construct_data_file_path(seg_id);
 
-        VLOG(3) << "delete index file. path=" << index_path;
+        VLOG_NOTICE << "delete index file. path=" << index_path;
         if (remove(index_path.c_str()) != 0) {
             // if the errno is not ENOENT, log the error msg.
             // ENOENT stands for 'No such file or directory'
@@ -233,7 +233,7 @@ bool SegmentGroup::delete_all_files() {
             }
         }
 
-        VLOG(3) << "delete data file. path=" << data_path;
+        VLOG_NOTICE << "delete data file. path=" << data_path;
         if (remove(data_path.c_str()) != 0) {
             if (errno != ENOENT) {
                 char errmsg[64];
@@ -485,7 +485,7 @@ OLAPStatus SegmentGroup::find_short_key(const RowCursor& key, RowCursor* helper_
         }
     }
 
-    VLOG(3) << "seg=" << offset.segment << ", offset=" << offset.offset;
+    VLOG_NOTICE << "seg=" << offset.segment << ", offset=" << offset.offset;
     return _index.get_row_block_position(offset, pos);
 }
 
@@ -703,7 +703,7 @@ OLAPStatus SegmentGroup::finalize_segment(uint32_t data_segment_size, int64_t nu
         return res;
     }
 
-    VLOG(3) << "finalize_segment. file_name=" << _current_file_handler.file_name()
+    VLOG_NOTICE << "finalize_segment. file_name=" << _current_file_handler.file_name()
             << ", file_length=" << file_length;
 
     if ((res = _current_file_handler.close()) != OLAP_SUCCESS) {
@@ -811,7 +811,7 @@ OLAPStatus SegmentGroup::convert_from_old_files(const std::string& snapshot_path
                          << ", to=" << new_data_file_name << ", errno=" << Errno::no();
             return OLAP_ERR_OS_ERROR;
         } else {
-            VLOG(3) << "link data file from " << old_data_file_name << " to " << new_data_file_name
+            VLOG_NOTICE << "link data file from " << old_data_file_name << " to " << new_data_file_name
                     << " successfully";
         }
         success_links->push_back(new_data_file_name);
@@ -830,7 +830,7 @@ OLAPStatus SegmentGroup::convert_from_old_files(const std::string& snapshot_path
                          << ", to=" << new_index_file_name << ", errno=" << Errno::no();
             return OLAP_ERR_OS_ERROR;
         } else {
-            VLOG(3) << "link index file from " << old_index_file_name << " to "
+            VLOG_NOTICE << "link index file from " << old_index_file_name << " to "
                     << new_index_file_name << " successfully";
         }
         success_links->push_back(new_index_file_name);
@@ -855,7 +855,7 @@ OLAPStatus SegmentGroup::convert_to_old_files(const std::string& snapshot_path,
             }
             success_links->push_back(old_data_file_name);
         }
-        VLOG(3) << "create hard link. from=" << new_data_file_name << ", "
+        VLOG_NOTICE << "create hard link. from=" << new_data_file_name << ", "
                 << "to=" << old_data_file_name;
         std::string new_index_file_name =
                 construct_index_file_path(_rowset_path_prefix, segment_id);
@@ -869,7 +869,7 @@ OLAPStatus SegmentGroup::convert_to_old_files(const std::string& snapshot_path,
             }
             success_links->push_back(old_index_file_name);
         }
-        VLOG(3) << "create hard link. from=" << new_index_file_name << ", "
+        VLOG_NOTICE << "create hard link. from=" << new_index_file_name << ", "
                 << "to=" << old_index_file_name;
     }
     return OLAP_SUCCESS;
diff --git a/be/src/olap/rowset/segment_reader.cpp b/be/src/olap/rowset/segment_reader.cpp
index 80f0336..114b704 100644
--- a/be/src/olap/rowset/segment_reader.cpp
+++ b/be/src/olap/rowset/segment_reader.cpp
@@ -129,7 +129,7 @@ OLAPStatus SegmentReader::_load_segment_file() {
         return res;
     }
 
-    //VLOG(3) << "seg file : " << _file_name;
+    //VLOG_NOTICE << "seg file : " << _file_name;
     // In file_header.unserialize(), it validates file length, signature, checksum of protobuf.
     _file_header = _segment_group->get_seg_pb(_segment_id);
     _null_supported = _segment_group->get_null_supported(_segment_id);
@@ -370,14 +370,14 @@ OLAPStatus SegmentReader::_pick_columns() {
 }
 
 OLAPStatus SegmentReader::_pick_delete_row_groups(uint32_t first_block, uint32_t last_block) {
-    VLOG(10) << "pick for " << first_block << " to " << last_block << " for delete_condition";
+    VLOG_TRACE << "pick for " << first_block << " to " << last_block << " for delete_condition";
 
     if (_delete_handler->empty()) {
         return OLAP_SUCCESS;
     }
 
     if (DEL_NOT_SATISFIED == _delete_status) {
-        VLOG(10) << "the segment not satisfy the delete_conditions";
+        VLOG_TRACE << "the segment not satisfy the delete_conditions";
         return OLAP_SUCCESS;
     }
 
@@ -422,11 +422,11 @@ OLAPStatus SegmentReader::_pick_delete_row_groups(uint32_t first_block, uint32_t
                 }
             } else if (true == del_partial_satisfied) {
                 _include_blocks[j] = DEL_PARTIAL_SATISFIED;
-                VLOG(10) << "filter block partially: " << j;
+                VLOG_TRACE << "filter block partially: " << j;
             } else {
                 _include_blocks[j] = DEL_SATISFIED;
                 --_remain_block;
-                VLOG(10) << "filter block: " << j;
+                VLOG_TRACE << "filter block: " << j;
                 if (j < _block_count - 1) {
                     _stats->rows_del_filtered += _num_rows_in_block;
                 } else {
@@ -456,7 +456,7 @@ OLAPStatus SegmentReader::_init_include_blocks(uint32_t first_block, uint32_t la
 }
 
 OLAPStatus SegmentReader::_pick_row_groups(uint32_t first_block, uint32_t last_block) {
-    VLOG(10) << "pick from " << first_block << " to " << last_block;
+    VLOG_TRACE << "pick from " << first_block << " to " << last_block;
 
     if (first_block > last_block) {
         OLAP_LOG_WARNING("invalid block offset. [first_block=%u last_block=%u]", first_block,
@@ -511,7 +511,7 @@ OLAPStatus SegmentReader::_pick_row_groups(uint32_t first_block, uint32_t last_b
     }
 
     if (_remain_block < MIN_FILTER_BLOCK_NUM) {
-        VLOG(10) << "bloom filter is ignored for too few block remained. "
+        VLOG_TRACE << "bloom filter is ignored for too few block remained. "
                  << "remain_block=" << _remain_block
                  << ", const_time=" << timer.get_elapse_time_us();
         return OLAP_SUCCESS;
@@ -548,7 +548,7 @@ OLAPStatus SegmentReader::_pick_row_groups(uint32_t first_block, uint32_t last_b
         }
     }
 
-    VLOG(10) << "pick row groups finished. remain_block=" << _remain_block
+    VLOG_TRACE << "pick row groups finished. remain_block=" << _remain_block
              << ", const_time=" << timer.get_elapse_time_us();
     return OLAP_SUCCESS;
 }
@@ -713,7 +713,7 @@ OLAPStatus SegmentReader::_load_index(bool is_using_cache) {
         }
     }
 
-    VLOG(10) << "found index entry count: " << _block_count;
+    VLOG_TRACE << "found index entry count: " << _block_count;
     return OLAP_SUCCESS;
 }
 
@@ -812,7 +812,7 @@ OLAPStatus SegmentReader::_seek_to_block_directly(int64_t block_id,
         PositionProvider position(&_column_indices[cid]->entry(block_id));
         if (OLAP_SUCCESS != (res = _column_readers[cid]->seek(&position))) {
             if (OLAP_ERR_COLUMN_STREAM_EOF == res) {
-                VLOG(10) << "Stream EOF. tablet_id=" << _segment_group->get_tablet_id()
+                VLOG_TRACE << "Stream EOF. tablet_id=" << _segment_group->get_tablet_id()
                          << ", column_id=" << _column_readers[cid]->column_unique_id()
                          << ", block_id=" << block_id;
                 return OLAP_ERR_DATA_EOF;
@@ -832,7 +832,7 @@ OLAPStatus SegmentReader::_seek_to_block_directly(int64_t block_id,
 }
 
 OLAPStatus SegmentReader::_reset_readers() {
-    VLOG(10) << _streams.size() << " stream in total.";
+    VLOG_TRACE << _streams.size() << " stream in total.";
 
     for (std::map<StreamName, ReadOnlyFileStream*>::iterator it = _streams.begin();
          it != _streams.end(); ++it) {
diff --git a/be/src/olap/rowset/segment_writer.cpp b/be/src/olap/rowset/segment_writer.cpp
index b0f3335..a0213a3 100644
--- a/be/src/olap/rowset/segment_writer.cpp
+++ b/be/src/olap/rowset/segment_writer.cpp
@@ -190,7 +190,7 @@ OLAPStatus SegmentWriter::_make_file_header(ColumnDataHeaderMessage* file_header
             data_length += stream->get_stream_length();
         }
 
-        VLOG(10) << "stream id=" << it->first.unique_column_id() << ", type=" << it->first.kind()
+        VLOG_TRACE << "stream id=" << it->first.unique_column_id() << ", type=" << it->first.kind()
                  << ", length=" << stream->get_stream_length();
     }
 
@@ -259,7 +259,7 @@ OLAPStatus SegmentWriter::finalize(uint32_t* segment_file_size) {
         // 输出没有被掐掉的流
         if (!stream->is_suppressed()) {
             checksum = stream->crc32(checksum);
-            VLOG(10) << "stream id=" << it->first.unique_column_id()
+            VLOG_TRACE << "stream id=" << it->first.unique_column_id()
                      << ", type=" << it->first.kind();
             res = stream->write_to_file(&file_handle, _write_mbytes_per_sec);
             if (OLAP_SUCCESS != res) {
diff --git a/be/src/olap/schema_change.cpp b/be/src/olap/schema_change.cpp
index 028b708..ad8b8d3 100644
--- a/be/src/olap/schema_change.cpp
+++ b/be/src/olap/schema_change.cpp
@@ -485,7 +485,7 @@ OLAPStatus RowBlockChanger::change_row_block(const RowBlock* ref_block, int32_t
                                  << _schema_mapping[i].materialized_function;
                     return OLAP_ERR_SCHEMA_CHANGE_INFO_INVALID;
                 }
-                VLOG(3) << "_schema_mapping[" << i
+                VLOG_NOTICE << "_schema_mapping[" << i
                         << "].materialized_function : " << _schema_mapping[i].materialized_function;
                 for (size_t row_index = 0, new_row_index = 0;
                      row_index < ref_block->row_block_info().row_num; ++row_index) {
@@ -601,7 +601,7 @@ OLAPStatus RowBlockChanger::change_row_block(const RowBlock* ref_block, int32_t
                 }
 
                 if (newtype < reftype) {
-                    VLOG(3) << "type degraded while altering column. "
+                    VLOG_NOTICE << "type degraded while altering column. "
                             << "column=" << mutable_block->tablet_schema().column(i).name()
                             << ", origin_type="
                             << ref_block->tablet_schema().column(ref_column).type()
@@ -729,7 +729,7 @@ RowBlockAllocator::RowBlockAllocator(const TabletSchema& tablet_schema, size_t m
           _memory_allocated(0),
           _row_len(tablet_schema.row_size()),
           _memory_limitation(memory_limitation) {
-    VLOG(3) << "RowBlockAllocator(). row_len=" << _row_len;
+    VLOG_NOTICE << "RowBlockAllocator(). row_len=" << _row_len;
 }
 
 RowBlockAllocator::~RowBlockAllocator() {
@@ -742,7 +742,7 @@ OLAPStatus RowBlockAllocator::allocate(RowBlock** row_block, size_t num_rows, bo
     size_t row_block_size = _row_len * num_rows;
 
     if (_memory_limitation > 0 && _memory_allocated + row_block_size > _memory_limitation) {
-        VLOG(3) << "RowBlockAllocator::alocate() memory exceeded. "
+        VLOG_NOTICE << "RowBlockAllocator::alocate() memory exceeded. "
                 << "m_memory_allocated=" << _memory_allocated;
         *row_block = nullptr;
         return OLAP_SUCCESS;
@@ -761,7 +761,7 @@ OLAPStatus RowBlockAllocator::allocate(RowBlock** row_block, size_t num_rows, bo
     (*row_block)->init(row_block_info);
 
     _memory_allocated += row_block_size;
-    VLOG(3) << "RowBlockAllocator::allocate() this=" << this << ", num_rows=" << num_rows
+    VLOG_NOTICE << "RowBlockAllocator::allocate() this=" << this << ", num_rows=" << num_rows
             << ", m_memory_allocated=" << _memory_allocated << ", row_block_addr=" << *row_block;
     return OLAP_SUCCESS;
 }
@@ -774,7 +774,7 @@ void RowBlockAllocator::release(RowBlock* row_block) {
 
     _memory_allocated -= row_block->capacity() * _row_len;
 
-    VLOG(3) << "RowBlockAllocator::release() this=" << this
+    VLOG_NOTICE << "RowBlockAllocator::release() this=" << this
             << ", num_rows=" << row_block->capacity()
             << ", m_memory_allocated=" << _memory_allocated << ", row_block_addr=" << row_block;
     delete row_block;
@@ -913,7 +913,7 @@ SchemaChangeDirectly::SchemaChangeDirectly(const RowBlockChanger& row_block_chan
         : _row_block_changer(row_block_changer), _row_block_allocator(nullptr), _cursor(nullptr) {}
 
 SchemaChangeDirectly::~SchemaChangeDirectly() {
-    VLOG(3) << "~SchemaChangeDirectly()";
+    VLOG_NOTICE << "~SchemaChangeDirectly()";
     SAFE_DELETE(_row_block_allocator);
     SAFE_DELETE(_cursor);
 }
@@ -977,7 +977,7 @@ OLAPStatus SchemaChangeDirectly::process(RowsetReaderSharedPtr rowset_reader,
         return OLAP_SUCCESS;
     }
 
-    VLOG(3) << "init writer. new_tablet=" << new_tablet->full_name()
+    VLOG_NOTICE << "init writer. new_tablet=" << new_tablet->full_name()
             << ", block_row_number=" << new_tablet->num_rows_per_row_block();
     RowBlock* new_row_block = nullptr;
 
@@ -1077,7 +1077,7 @@ SchemaChangeWithSorting::SchemaChangeWithSorting(const RowBlockChanger& row_bloc
 }
 
 SchemaChangeWithSorting::~SchemaChangeWithSorting() {
-    VLOG(3) << "~SchemaChangeWithSorting()";
+    VLOG_NOTICE << "~SchemaChangeWithSorting()";
     SAFE_DELETE(_row_block_allocator);
 }
 
@@ -1313,7 +1313,7 @@ bool SchemaChangeWithSorting::_internal_sorting(const std::vector<RowBlock*>& ro
     context.version = version;
     context.version_hash = version_hash;
     context.segments_overlap = segments_overlap;
-    VLOG(3) << "init rowset builder. tablet=" << new_tablet->full_name()
+    VLOG_NOTICE << "init rowset builder. tablet=" << new_tablet->full_name()
             << ", block_row_size=" << new_tablet->num_rows_per_row_block();
 
     std::unique_ptr<RowsetWriter> rowset_writer;
@@ -1868,7 +1868,7 @@ OLAPStatus SchemaChangeHandler::_convert_historical_rowsets(const SchemaChangePa
 
     // c. 转换历史数据
     for (auto& rs_reader : sc_params.ref_rowset_readers) {
-        VLOG(10) << "begin to convert a history rowset. version=" << rs_reader->version().first
+        VLOG_TRACE << "begin to convert a history rowset. version=" << rs_reader->version().first
                  << "-" << rs_reader->version().second;
 
         // set status for monitor
@@ -1939,13 +1939,13 @@ OLAPStatus SchemaChangeHandler::_convert_historical_rowsets(const SchemaChangePa
             sc_params.new_tablet->release_push_lock();
             goto PROCESS_ALTER_EXIT;
         } else {
-            VLOG(3) << "register new version. tablet=" << sc_params.new_tablet->full_name()
+            VLOG_NOTICE << "register new version. tablet=" << sc_params.new_tablet->full_name()
                     << ", version=" << rs_reader->version().first << "-"
                     << rs_reader->version().second;
         }
         sc_params.new_tablet->release_push_lock();
 
-        VLOG(10) << "succeed to convert a history version."
+        VLOG_TRACE << "succeed to convert a history version."
                  << " version=" << rs_reader->version().first << "-" << rs_reader->version().second;
     }
     // XXX: 此时应该不取消SchemaChange状态,因为新Delta还要转换成新旧Schema的版本
@@ -1993,7 +1993,7 @@ OLAPStatus SchemaChangeHandler::_parse_request(
             }
 
             column_mapping->ref_column = column_index;
-            VLOG(3) << "A column refered to existed column will be added after schema changing."
+            VLOG_NOTICE << "A column refered to existed column will be added after schema changing."
                     << "column=" << column_name << ", ref_column=" << column_index;
             continue;
         }
@@ -2035,7 +2035,7 @@ OLAPStatus SchemaChangeHandler::_parse_request(
                 return res;
             }
 
-            VLOG(10) << "A column with default value will be added after schema changing. "
+            VLOG_TRACE << "A column with default value will be added after schema changing. "
                      << "column=" << column_name
                      << ", default_value=" << new_column.default_value();
             continue;
@@ -2048,7 +2048,7 @@ OLAPStatus SchemaChangeHandler::_parse_request(
             return res;
         }
 
-        VLOG(3) << "A new schema delta is converted while dropping column. "
+        VLOG_NOTICE << "A new schema delta is converted while dropping column. "
                 << "Dropped column will be assigned as '0' for the older schema. "
                 << "column=" << column_name;
     }
diff --git a/be/src/olap/snapshot_manager.cpp b/be/src/olap/snapshot_manager.cpp
index 3d413e0..71c892c 100644
--- a/be/src/olap/snapshot_manager.cpp
+++ b/be/src/olap/snapshot_manager.cpp
@@ -332,7 +332,7 @@ OLAPStatus SnapshotManager::_create_snapshot_files(const TabletSharedPtr& ref_ta
     string schema_full_path = get_schema_hash_full_path(ref_tablet, snapshot_id_path);
     string header_path = _get_header_full_path(ref_tablet, schema_full_path);
     if (FileUtils::check_exist(schema_full_path)) {
-        VLOG(10) << "remove the old schema_full_path.";
+        VLOG_TRACE << "remove the old schema_full_path.";
         FileUtils::remove_all(schema_full_path);
     }
 
@@ -413,7 +413,7 @@ OLAPStatus SnapshotManager::_create_snapshot_files(const TabletSharedPtr& ref_ta
                 break;
             }
             rs_metas.push_back(rs->rowset_meta());
-            VLOG(3) << "add rowset meta to clone list. "
+            VLOG_NOTICE << "add rowset meta to clone list. "
                     << " start version " << rs->rowset_meta()->start_version() << " end version "
                     << rs->rowset_meta()->end_version() << " empty " << rs->rowset_meta()->empty();
         }
@@ -496,7 +496,7 @@ OLAPStatus SnapshotManager::_create_snapshot_files(const TabletSharedPtr& ref_ta
                      << snapshot_id_path.c_str();
 
         if (FileUtils::check_exist(snapshot_id_path)) {
-            VLOG(3) << "remove snapshot path. [path=" << snapshot_id_path << "]";
+            VLOG_NOTICE << "remove snapshot path. [path=" << snapshot_id_path << "]";
             FileUtils::remove_all(snapshot_id_path);
         }
     } else {
diff --git a/be/src/olap/storage_engine.cpp b/be/src/olap/storage_engine.cpp
index 3b952c3..21d86ea 100644
--- a/be/src/olap/storage_engine.cpp
+++ b/be/src/olap/storage_engine.cpp
@@ -579,9 +579,9 @@ void StorageEngine::clear_transaction_task(const TTransactionId transaction_id,
 }
 
 void StorageEngine::_start_clean_fd_cache() {
-    VLOG(10) << "start clean file descritpor cache";
+    VLOG_TRACE << "start clean file descritpor cache";
     _file_cache->prune();
-    VLOG(10) << "end clean file descritpor cache";
+    VLOG_TRACE << "end clean file descritpor cache";
 }
 
 OLAPStatus StorageEngine::_start_trash_sweep(double* usage) {
@@ -733,7 +733,7 @@ OLAPStatus StorageEngine::_do_sweep(const string& scan_root, const time_t& local
             if (pos != string::npos) {
                 actual_expire = std::stoi(dir_name.substr(pos + 1));
             }
-            VLOG(10) << "get actual expire time " << actual_expire << " of dir: " << dir_name;
+            VLOG_TRACE << "get actual expire time " << actual_expire << " of dir: " << dir_name;
 
             if (difftime(local_now, mktime(&local_tm_create)) >= actual_expire) {
                 Status ret = FileUtils::remove_all(path_name);
@@ -770,11 +770,11 @@ void StorageEngine::start_delete_unused_rowset() {
         if (it->second.use_count() != 1) {
             ++it;
         } else if (it->second->need_delete_file()) {
-            VLOG(3) << "start to remove rowset:" << it->second->rowset_id()
+            VLOG_NOTICE << "start to remove rowset:" << it->second->rowset_id()
                     << ", version:" << it->second->version().first << "-"
                     << it->second->version().second;
             OLAPStatus status = it->second->remove();
-            VLOG(3) << "remove rowset:" << it->second->rowset_id()
+            VLOG_NOTICE << "remove rowset:" << it->second->rowset_id()
                     << " finished. status:" << status;
             it = _unused_rowsets.erase(it);
         }
@@ -786,7 +786,7 @@ void StorageEngine::add_unused_rowset(RowsetSharedPtr rowset) {
         return;
     }
 
-    VLOG(3) << "add unused rowset, rowset id:" << rowset->rowset_id()
+    VLOG_NOTICE << "add unused rowset, rowset id:" << rowset->rowset_id()
             << ", version:" << rowset->version().first << "-" << rowset->version().second
             << ", unique id:" << rowset->unique_id();
 
diff --git a/be/src/olap/stream_index_writer.cpp b/be/src/olap/stream_index_writer.cpp
index 9f56e6a..9de3f15 100644
--- a/be/src/olap/stream_index_writer.cpp
+++ b/be/src/olap/stream_index_writer.cpp
@@ -161,7 +161,7 @@ OLAPStatus StreamIndexWriter::write_to_buffer(char* buffer, size_t buffer_size)
     }
 
     _header.block_count = _index_to_write.size();
-    VLOG(10) << "header info. pos: " << _header.position_format
+    VLOG_TRACE << "header info. pos: " << _header.position_format
              << ", stat:" << _header.statistic_format << ", entry_size:" << entry_size;
     memcpy(buffer, reinterpret_cast<char*>(&_header), sizeof(_header));
     // set offset, write data
diff --git a/be/src/olap/tablet.cpp b/be/src/olap/tablet.cpp
index 1b5320b..eff6a10 100644
--- a/be/src/olap/tablet.cpp
+++ b/be/src/olap/tablet.cpp
@@ -83,7 +83,7 @@ Tablet::Tablet(TabletMetaSharedPtr tablet_meta, DataDir* data_dir,
 
 OLAPStatus Tablet::_init_once_action() {
     OLAPStatus res = OLAP_SUCCESS;
-    VLOG(3) << "begin to load tablet. tablet=" << full_name()
+    VLOG_NOTICE << "begin to load tablet. tablet=" << full_name()
             << ", version_size=" << _tablet_meta->version_count();
 
     // init cumulative compaction policy by type
@@ -176,7 +176,7 @@ OLAPStatus Tablet::revise_tablet_meta(const std::vector<RowsetMetaSharedPtr>& ro
         for (auto& rs_meta : rowsets_to_clone) {
             new_tablet_meta->add_rs_meta(rs_meta);
         }
-        VLOG(3) << "load rowsets successfully when clone. tablet=" << full_name()
+        VLOG_NOTICE << "load rowsets successfully when clone. tablet=" << full_name()
                 << ", added rowset size=" << rowsets_to_clone.size();
         // save and reload tablet_meta
         res = new_tablet_meta->save_meta(_data_dir);
@@ -296,7 +296,7 @@ void Tablet::modify_rowsets(const std::vector<RowsetSharedPtr>& to_add,
 const RowsetSharedPtr Tablet::get_rowset_by_version(const Version& version) const {
     auto iter = _rs_version_map.find(version);
     if (iter == _rs_version_map.end()) {
-        VLOG(3) << "no rowset for version:" << version << ", tablet: " << full_name();
+        VLOG_NOTICE << "no rowset for version:" << version << ", tablet: " << full_name();
         return nullptr;
     }
     return iter->second;
@@ -305,7 +305,7 @@ const RowsetSharedPtr Tablet::get_rowset_by_version(const Version& version) cons
 const RowsetSharedPtr Tablet::get_stale_rowset_by_version(const Version& version) const {
     auto iter = _stale_rs_version_map.find(version);
     if (iter == _stale_rs_version_map.end()) {
-        VLOG(3) << "no rowset for version:" << version << ", tablet: " << full_name();
+        VLOG_NOTICE << "no rowset for version:" << version << ", tablet: " << full_name();
         return nullptr;
     }
     return iter->second;
@@ -317,7 +317,7 @@ const RowsetSharedPtr Tablet::get_stale_rowset_by_version(const Version& version
 const RowsetSharedPtr Tablet::get_inc_rowset_by_version(const Version& version) const {
     auto iter = _inc_rs_version_map.find(version);
     if (iter == _inc_rs_version_map.end()) {
-        VLOG(3) << "no rowset for version:" << version << ", tablet: " << full_name();
+        VLOG_NOTICE << "no rowset for version:" << version << ", tablet: " << full_name();
         return nullptr;
     }
     return iter->second;
@@ -383,7 +383,7 @@ void Tablet::_delete_inc_rowset_by_version(const Version& version,
         return;
     }
     _tablet_meta->delete_inc_rs_meta_by_version(version);
-    VLOG(3) << "delete incremental rowset. tablet=" << full_name() << ", version=" << version;
+    VLOG_NOTICE << "delete incremental rowset. tablet=" << full_name() << ", version=" << version;
 }
 
 void Tablet::_delete_stale_rowset_by_version(const Version& version) {
@@ -392,7 +392,7 @@ void Tablet::_delete_stale_rowset_by_version(const Version& version) {
         return;
     }
     _tablet_meta->delete_stale_rs_meta_by_version(version);
-    VLOG(3) << "delete stale rowset. tablet=" << full_name() << ", version=" << version;
+    VLOG_NOTICE << "delete stale rowset. tablet=" << full_name() << ", version=" << version;
 }
 
 void Tablet::delete_expired_inc_rowsets() {
@@ -404,7 +404,7 @@ void Tablet::delete_expired_inc_rowsets() {
         if (diff >= config::inc_rowset_expired_sec) {
             Version version(rs_meta->version());
             expired_versions.push_back(std::make_pair(version, rs_meta->version_hash()));
-            VLOG(3) << "find expire incremental rowset. tablet=" << full_name()
+            VLOG_NOTICE << "find expire incremental rowset. tablet=" << full_name()
                     << ", version=" << version << ", version_hash=" << rs_meta->version_hash()
                     << ", exist_sec=" << diff;
         }
@@ -416,7 +416,7 @@ void Tablet::delete_expired_inc_rowsets() {
 
     for (auto& pair : expired_versions) {
         _delete_inc_rowset_by_version(pair.first, pair.second);
-        VLOG(3) << "delete expire incremental data. tablet=" << full_name()
+        VLOG_NOTICE << "delete expire incremental data. tablet=" << full_name()
                 << ", version=" << pair.first;
     }
 
@@ -667,7 +667,7 @@ OLAPStatus Tablet::capture_rs_readers(const std::vector<Version>& version_path,
     for (auto version : version_path) {
         auto it = _rs_version_map.find(version);
         if (it == _rs_version_map.end()) {
-            VLOG(3) << "fail to find Rowset in rs_version for version. tablet=" << full_name()
+            VLOG_NOTICE << "fail to find Rowset in rs_version for version. tablet=" << full_name()
                     << ", version='" << version.first << "-" << version.second;
 
             it = _stale_rs_version_map.find(version);
@@ -938,7 +938,7 @@ OLAPStatus Tablet::split_range(const OlapTuple& start_key_strings, const OlapTup
 
     // 如果找不到合适的rowset,就直接返回startkey,endkey
     if (rowset == nullptr) {
-        VLOG(3) << "there is no base file now, may be tablet is empty.";
+        VLOG_NOTICE << "there is no base file now, may be tablet is empty.";
         // it may be right if the tablet is empty, so we return success.
         ranges->emplace_back(start_key.to_tuple());
         ranges->emplace_back(end_key.to_tuple());
diff --git a/be/src/olap/tablet_manager.cpp b/be/src/olap/tablet_manager.cpp
index aa95512..f6024fd 100644
--- a/be/src/olap/tablet_manager.cpp
+++ b/be/src/olap/tablet_manager.cpp
@@ -82,7 +82,7 @@ OLAPStatus TabletManager::_add_tablet_unlocked(TTabletId tablet_id, SchemaHash s
                                                const TabletSharedPtr& tablet, bool update_meta,
                                                bool force) {
     OLAPStatus res = OLAP_SUCCESS;
-    VLOG(3) << "begin to add tablet to TabletManager. "
+    VLOG_NOTICE << "begin to add tablet to TabletManager. "
             << "tablet_id=" << tablet_id << ", schema_hash=" << schema_hash << ", force=" << force;
 
     TabletSharedPtr existed_tablet = nullptr;
@@ -186,7 +186,7 @@ OLAPStatus TabletManager::_add_tablet_to_map_unlocked(TTabletId tablet_id, Schem
     tablet_map[tablet_id].table_arr.sort(_cmp_tablet_by_create_time);
     _add_tablet_to_partition(*tablet);
 
-    VLOG(3) << "add tablet to map successfully."
+    VLOG_NOTICE << "add tablet to map successfully."
             << " tablet_id=" << tablet_id << ", schema_hash=" << schema_hash;
     return res;
 }
@@ -553,7 +553,7 @@ OLAPStatus TabletManager::drop_tablets_on_error_root_path(
                 continue;
             }
             TSchemaHash schema_hash = tablet_info.schema_hash;
-            VLOG(3) << "drop_tablet begin. tablet_id=" << tablet_id
+            VLOG_NOTICE << "drop_tablet begin. tablet_id=" << tablet_id
                     << ", schema_hash=" << schema_hash;
             TabletSharedPtr dropped_tablet = _get_tablet_unlocked(tablet_id, schema_hash);
             if (dropped_tablet == nullptr) {
@@ -668,7 +668,7 @@ void TabletManager::get_tablet_stat(TTabletStatResult* result) {
         int interval_sec = config::tablet_stat_cache_update_interval_second;
         std::lock_guard<std::mutex> l(_tablet_stat_mutex);
         if (curr_ms - _last_update_stat_ms > interval_sec * 1000) {
-            VLOG(3) << "update tablet stat.";
+            VLOG_NOTICE << "update tablet stat.";
             _build_tablet_stat();
             _last_update_stat_ms = UnixMillis();
         }
@@ -726,7 +726,7 @@ TabletSharedPtr TabletManager::find_best_tablet_to_compaction(
                 }
                 if (now_ms - last_failure_ms <=
                     config::min_compaction_failure_interval_sec * 1000) {
-                    VLOG(1) << "Too often to check compaction, skip it. "
+                    VLOG_CRITICAL << "Too often to check compaction, skip it. "
                             << "compaction_type=" << compaction_type_str
                             << ", last_failure_time_ms=" << last_failure_ms
                             << ", tablet_id=" << tablet_ptr->tablet_id();
@@ -768,7 +768,7 @@ TabletSharedPtr TabletManager::find_best_tablet_to_compaction(
     }
 
     if (best_tablet != nullptr) {
-        VLOG(1) << "Found the best tablet for compaction. "
+        VLOG_CRITICAL << "Found the best tablet for compaction. "
                 << "compaction_type=" << compaction_type_str
                 << ", tablet_id=" << best_tablet->tablet_id() << ", path=" << data_dir->path()
                 << ", compaction_score=" << compaction_score
@@ -903,7 +903,7 @@ OLAPStatus TabletManager::load_tablet_from_dir(DataDir* store, TTabletId tablet_
 }
 
 void TabletManager::release_schema_change_lock(TTabletId tablet_id) {
-    VLOG(3) << "release_schema_change_lock begin. tablet_id=" << tablet_id;
+    VLOG_NOTICE << "release_schema_change_lock begin. tablet_id=" << tablet_id;
     ReadLock rlock(_get_tablets_shard_lock(tablet_id));
 
     tablet_map_t& tablet_map = _get_tablet_map(tablet_id);
@@ -913,7 +913,7 @@ void TabletManager::release_schema_change_lock(TTabletId tablet_id) {
     } else {
         it->second.schema_change_lock.unlock();
     }
-    VLOG(3) << "release_schema_change_lock end. tablet_id=" << tablet_id;
+    VLOG_NOTICE << "release_schema_change_lock end. tablet_id=" << tablet_id;
 }
 
 OLAPStatus TabletManager::report_tablet_info(TTabletInfo* tablet_info) {
@@ -933,7 +933,7 @@ OLAPStatus TabletManager::report_tablet_info(TTabletInfo* tablet_info) {
     }
 
     tablet->build_tablet_report_info(tablet_info);
-    VLOG(10) << "success to process report tablet info.";
+    VLOG_TRACE << "success to process report tablet info.";
     return res;
 }
 
@@ -1157,7 +1157,7 @@ void TabletManager::try_delete_unused_tablet_path(DataDir* data_dir, TTabletId t
 
 bool TabletManager::try_schema_change_lock(TTabletId tablet_id) {
     bool res = false;
-    VLOG(3) << "try_schema_change_lock begin. tablet_id=" << tablet_id;
+    VLOG_NOTICE << "try_schema_change_lock begin. tablet_id=" << tablet_id;
     ReadLock rlock(_get_tablets_shard_lock(tablet_id));
     tablet_map_t& tablet_map = _get_tablet_map(tablet_id);
     tablet_map_t::iterator it = tablet_map.find(tablet_id);
@@ -1166,7 +1166,7 @@ bool TabletManager::try_schema_change_lock(TTabletId tablet_id) {
     } else {
         res = (it->second.schema_change_lock.trylock() == OLAP_SUCCESS);
     }
-    VLOG(3) << "try_schema_change_lock end. tablet_id=" << tablet_id;
+    VLOG_NOTICE << "try_schema_change_lock end. tablet_id=" << tablet_id;
     return res;
 }
 
@@ -1245,7 +1245,7 @@ void TabletManager::_build_tablet_stat() {
                 }
                 stat.__set_data_size(tablet->tablet_footprint());
                 stat.__set_row_num(tablet->num_rows());
-                VLOG(3) << "building tablet stat. tablet_id=" << item.first
+                VLOG_NOTICE << "building tablet stat. tablet_id=" << item.first
                         << ", data_size=" << tablet->tablet_footprint()
                         << ", row_num=" << tablet->num_rows();
                 break;
@@ -1264,7 +1264,7 @@ OLAPStatus TabletManager::_create_initial_rowset_unlocked(const TCreateTabletReq
         return OLAP_ERR_CE_CMD_PARAMS_ERROR;
     } else {
         Version version(0, request.version);
-        VLOG(3) << "begin to create init version. version=" << version;
+        VLOG_NOTICE << "begin to create init version. version=" << version;
         RowsetSharedPtr new_rowset;
         do {
             RowsetWriterContext context;
@@ -1420,21 +1420,21 @@ OLAPStatus TabletManager::_drop_tablet_directly_unlocked(TTabletId tablet_id,
 }
 
 TabletSharedPtr TabletManager::_get_tablet_unlocked(TTabletId tablet_id, SchemaHash schema_hash) {
-    VLOG(3) << "begin to get tablet. tablet_id=" << tablet_id << ", schema_hash=" << schema_hash;
+    VLOG_NOTICE << "begin to get tablet. tablet_id=" << tablet_id << ", schema_hash=" << schema_hash;
     tablet_map_t& tablet_map = _get_tablet_map(tablet_id);
     tablet_map_t::iterator it = tablet_map.find(tablet_id);
     if (it != tablet_map.end()) {
         for (TabletSharedPtr tablet : it->second.table_arr) {
             CHECK(tablet != nullptr) << "tablet is nullptr. tablet_id=" << tablet_id;
             if (tablet->equal(tablet_id, schema_hash)) {
-                VLOG(3) << "get tablet success. tablet_id=" << tablet_id
+                VLOG_NOTICE << "get tablet success. tablet_id=" << tablet_id
                         << ", schema_hash=" << schema_hash;
                 return tablet;
             }
         }
     }
 
-    VLOG(3) << "fail to get tablet. tablet_id=" << tablet_id << ", schema_hash=" << schema_hash;
+    VLOG_NOTICE << "fail to get tablet. tablet_id=" << tablet_id << ", schema_hash=" << schema_hash;
     // Return nullptr tablet if fail
     TabletSharedPtr tablet;
     return tablet;
diff --git a/be/src/olap/tablet_meta_manager.cpp b/be/src/olap/tablet_meta_manager.cpp
index 95d4331..e6de35e 100644
--- a/be/src/olap/tablet_meta_manager.cpp
+++ b/be/src/olap/tablet_meta_manager.cpp
@@ -101,7 +101,7 @@ OLAPStatus TabletMetaManager::save(DataDir* store, TTabletId tablet_id, TSchemaH
     std::stringstream key_stream;
     key_stream << header_prefix << tablet_id << "_" << schema_hash;
     std::string key = key_stream.str();
-    VLOG(3) << "save tablet meta to meta store: key = " << key;
+    VLOG_NOTICE << "save tablet meta to meta store: key = " << key;
     OlapMeta* meta = store->get_meta();
 
     TabletMetaPB de_tablet_meta_pb;
diff --git a/be/src/olap/task/engine_batch_load_task.cpp b/be/src/olap/task/engine_batch_load_task.cpp
index 09965fe..dc71791 100644
--- a/be/src/olap/task/engine_batch_load_task.cpp
+++ b/be/src/olap/task/engine_batch_load_task.cpp
@@ -101,7 +101,7 @@ AgentStatus EngineBatchLoadTask::_init() {
     AgentStatus status = DORIS_SUCCESS;
 
     if (_is_init) {
-        VLOG(3) << "has been inited";
+        VLOG_NOTICE << "has been inited";
         return status;
     }
 
@@ -201,7 +201,7 @@ AgentStatus EngineBatchLoadTask::_process() {
             time_t now = time(NULL);
             if (_push_req.timeout > 0 && _push_req.timeout < now) {
                 // return status to break this callback
-                VLOG(3) << "check time out. time_out:" << _push_req.timeout << ", now:" << now;
+                VLOG_NOTICE << "check time out. time_out:" << _push_req.timeout << ", now:" << now;
                 is_timeout = true;
                 return Status::OK();
             }
diff --git a/be/src/olap/task/engine_checksum_task.cpp b/be/src/olap/task/engine_checksum_task.cpp
index 9e67fc3..b27550e 100644
--- a/be/src/olap/task/engine_checksum_task.cpp
+++ b/be/src/olap/task/engine_checksum_task.cpp
@@ -105,7 +105,7 @@ OLAPStatus EngineChecksumTask::_compute_checksum() {
         OLAPStatus res =
                 reader.next_row_with_aggregation(&row, mem_pool.get(), agg_object_pool.get(), &eof);
         if (res == OLAP_SUCCESS && eof) {
-            VLOG(3) << "reader reads to the end.";
+            VLOG_NOTICE << "reader reads to the end.";
             break;
         } else if (res != OLAP_SUCCESS) {
             OLAP_LOG_WARNING("fail to read in reader. [res=%d]", res);
diff --git a/be/src/olap/task/engine_clone_task.cpp b/be/src/olap/task/engine_clone_task.cpp
index c27db9b..4d346a5 100644
--- a/be/src/olap/task/engine_clone_task.cpp
+++ b/be/src/olap/task/engine_clone_task.cpp
@@ -659,7 +659,7 @@ OLAPStatus EngineCloneTask::_finish_clone(Tablet* tablet, const string& clone_di
         // link files from clone dir, if file exists, skip it
         for (const string& clone_file : clone_files) {
             if (local_files.find(clone_file) != local_files.end()) {
-                VLOG(3) << "find same file when clone, skip it. "
+                VLOG_NOTICE << "find same file when clone, skip it. "
                         << "tablet=" << tablet->full_name() << ", clone_file=" << clone_file;
                 continue;
             }
@@ -723,7 +723,7 @@ OLAPStatus EngineCloneTask::_clone_incremental_data(Tablet* tablet,
     std::vector<Version> versions_to_delete;
     std::vector<RowsetMetaSharedPtr> rowsets_to_clone;
 
-    VLOG(3) << "get missed versions again when finish incremental clone. "
+    VLOG_NOTICE << "get missed versions again when finish incremental clone. "
             << "tablet=" << tablet->full_name() << ", committed_version=" << committed_version
             << ", missed_versions_size=" << missed_versions.size();
 
diff --git a/be/src/olap/txn_manager.cpp b/be/src/olap/txn_manager.cpp
index cc1956c..6349e72 100644
--- a/be/src/olap/txn_manager.cpp
+++ b/be/src/olap/txn_manager.cpp
@@ -161,7 +161,7 @@ OLAPStatus TxnManager::prepare_txn(TPartitionId partition_id, TTransactionId tra
     txn_tablet_map[key][tablet_info] = load_info;
     _insert_txn_partition_map_unlocked(transaction_id, partition_id);
 
-    VLOG(3) << "add transaction to engine successfully."
+    VLOG_NOTICE << "add transaction to engine successfully."
             << "partition_id: " << key.first << ", transaction_id: " << key.second
             << ", tablet: " << tablet_info.to_string();
     return OLAP_SUCCESS;
@@ -387,7 +387,7 @@ OLAPStatus TxnManager::delete_txn(OlapMeta* meta, TPartitionId partition_id,
 #ifndef BE_TEST
                 StorageEngine::instance()->add_unused_rowset(load_info.rowset);
 #endif
-                VLOG(3) << "delete transaction from engine successfully."
+                VLOG_NOTICE << "delete transaction from engine successfully."
                         << " partition_id: " << key.first << ", transaction_id: " << key.second
                         << ", tablet: " << tablet_info.to_string() << ", rowset: "
                         << (load_info.rowset != nullptr ? load_info.rowset->rowset_id().to_string()
@@ -419,7 +419,7 @@ void TxnManager::get_tablet_related_txns(TTabletId tablet_id, SchemaHash schema_
             if (it.second.find(tablet_info) != it.second.end()) {
                 *partition_id = it.first.first;
                 transaction_ids->insert(it.first.second);
-                VLOG(3) << "find transaction on tablet."
+                VLOG_NOTICE << "find transaction on tablet."
                         << "partition_id: " << it.first.first
                         << ", transaction_id: " << it.first.second
                         << ", tablet: " << tablet_info.to_string();
@@ -474,7 +474,7 @@ void TxnManager::get_txn_related_tablets(const TTransactionId transaction_id,
     txn_tablet_map_t& txn_tablet_map = _get_txn_tablet_map(transaction_id);
     auto it = txn_tablet_map.find(key);
     if (it == txn_tablet_map.end()) {
-        VLOG(3) << "could not find tablet for"
+        VLOG_NOTICE << "could not find tablet for"
                 << " partition_id=" << partition_id << ", transaction_id=" << transaction_id;
         return;
     }
@@ -524,7 +524,7 @@ void TxnManager::build_expire_txn_map(std::map<TabletInfo, std::vector<int64_t>>
                 if (diff >= config::pending_data_expire_time_sec) {
                     (*expire_txn_map)[t_map.first].push_back(txn_id);
                     if (VLOG_IS_ON(3)) {
-                        VLOG(3) << "find expired txn."
+                        VLOG_NOTICE << "find expired txn."
                                 << " tablet=" << t_map.first.to_string()
                                 << " transaction_id=" << txn_id << " exist_sec=" << diff;
                     }
diff --git a/be/src/olap/utils.cpp b/be/src/olap/utils.cpp
index 6ae1164..84e2c36 100644
--- a/be/src/olap/utils.cpp
+++ b/be/src/olap/utils.cpp
@@ -85,7 +85,7 @@ OLAPStatus olap_compress(const char* src_buf, size_t src_len, char* dest_buf, si
 
             return OLAP_ERR_COMPRESS_ERROR;
         } else if (*written_len > dest_len) {
-            VLOG(3) << "buffer overflow when compressing. "
+            VLOG_NOTICE << "buffer overflow when compressing. "
                     << "dest_len=" << dest_len << ", written_len=" << *written_len;
 
             return OLAP_ERR_BUFFER_OVERFLOW;
@@ -106,7 +106,7 @@ OLAPStatus olap_compress(const char* src_buf, size_t src_len, char* dest_buf, si
 
             return OLAP_ERR_COMPRESS_ERROR;
         } else if (*written_len > dest_len) {
-            VLOG(3) << "buffer overflow when compressing. "
+            VLOG_NOTICE << "buffer overflow when compressing. "
                     << ", dest_len=" << dest_len << ", written_len=" << *written_len;
 
             return OLAP_ERR_BUFFER_OVERFLOW;
@@ -120,7 +120,7 @@ OLAPStatus olap_compress(const char* src_buf, size_t src_len, char* dest_buf, si
         int lz4_res = LZ4_compress_default(src_buf, dest_buf, src_len, dest_len);
         *written_len = lz4_res;
         if (0 == lz4_res) {
-            VLOG(10) << "compress failed. src_len=" << src_len << ", dest_len=" << dest_len
+            VLOG_TRACE << "compress failed. src_len=" << src_len << ", dest_len=" << dest_len
                      << ", written_len=" << *written_len << ", lz4_res=" << lz4_res;
             return OLAP_ERR_BUFFER_OVERFLOW;
         }
@@ -626,7 +626,7 @@ OLAPStatus move_to_trash(const boost::filesystem::path& schema_hash_root,
     }
 
     // 3. remove file to trash
-    VLOG(3) << "move file to trash. " << old_file_path << " -> " << new_file_path;
+    VLOG_NOTICE << "move file to trash. " << old_file_path << " -> " << new_file_path;
     if (rename(old_file_path.c_str(), new_file_path.c_str()) < 0) {
         OLAP_LOG_WARNING("move file to trash failed. [file=%s target='%s' err='%m']",
                          old_file_path.c_str(), new_file_path.c_str());
@@ -708,7 +708,7 @@ COPY_EXIT:
         ::close(dest_fd);
     }
 
-    VLOG(3) << "copy file success. [src=" << src << " dest=" << dest << "]";
+    VLOG_NOTICE << "copy file success. [src=" << src << " dest=" << dest << "]";
 
     return res;
 }
@@ -777,7 +777,7 @@ OLAPStatus read_write_test_file(const string& test_file_path) {
     }
     if (remove(test_file_path.c_str()) != 0) {
         char errmsg[64];
-        VLOG(3) << "fail to delete test file. [err='" << strerror_r(errno, errmsg, 64) << "' path='"
+        VLOG_NOTICE << "fail to delete test file. [err='" << strerror_r(errno, errmsg, 64) << "' path='"
                 << test_file_path << "']";
         return OLAP_ERR_IO_ERROR;
     }
diff --git a/be/src/olap/version_graph.cpp b/be/src/olap/version_graph.cpp
index 32854cf..fb50b86 100644
--- a/be/src/olap/version_graph.cpp
+++ b/be/src/olap/version_graph.cpp
@@ -38,7 +38,7 @@ void TimestampedVersionTracker::_construct_versioned_tracker(
 void TimestampedVersionTracker::construct_versioned_tracker(
         const std::vector<RowsetMetaSharedPtr>& rs_metas) {
     if (rs_metas.empty()) {
-        VLOG(3) << "there is no version in the header.";
+        VLOG_NOTICE << "there is no version in the header.";
         return;
     }
     _stale_version_path_map.clear();
@@ -50,7 +50,7 @@ void TimestampedVersionTracker::construct_versioned_tracker(
         const std::vector<RowsetMetaSharedPtr>& rs_metas,
         const std::vector<RowsetMetaSharedPtr>& stale_metas) {
     if (rs_metas.empty()) {
-        VLOG(3) << "there is no version in the header.";
+        VLOG_NOTICE << "there is no version in the header.";
         return;
     }
     _stale_version_path_map.clear();
@@ -294,7 +294,7 @@ void TimestampedVersionTracker::add_version(const Version& version) {
 void TimestampedVersionTracker::add_stale_path_version(
         const std::vector<RowsetMetaSharedPtr>& stale_rs_metas) {
     if (stale_rs_metas.empty()) {
-        VLOG(3) << "there is no version in the stale_rs_metas.";
+        VLOG_NOTICE << "there is no version in the stale_rs_metas.";
         return;
     }
 
@@ -341,7 +341,7 @@ void TimestampedVersionTracker::capture_expired_paths(
 
 PathVersionListSharedPtr TimestampedVersionTracker::fetch_path_version_by_id(int64_t path_id) {
     if (_stale_version_path_map.count(path_id) == 0) {
-        VLOG(3) << "path version " << path_id << " does not exist!";
+        VLOG_NOTICE << "path version " << path_id << " does not exist!";
         return nullptr;
     }
 
@@ -350,7 +350,7 @@ PathVersionListSharedPtr TimestampedVersionTracker::fetch_path_version_by_id(int
 
 PathVersionListSharedPtr TimestampedVersionTracker::fetch_and_delete_path_by_id(int64_t path_id) {
     if (_stale_version_path_map.count(path_id) == 0) {
-        VLOG(3) << "path version " << path_id << " does not exist!";
+        VLOG_NOTICE << "path version " << path_id << " does not exist!";
         return nullptr;
     }
 
@@ -412,7 +412,7 @@ std::vector<TimestampedVersionSharedPtr>& TimestampedVersionPathContainer::times
 void VersionGraph::construct_version_graph(const std::vector<RowsetMetaSharedPtr>& rs_metas,
                                            int64_t* max_version) {
     if (rs_metas.empty()) {
-        VLOG(3) << "there is no version in the header.";
+        VLOG_NOTICE << "there is no version in the header.";
         return;
     }
 
@@ -521,7 +521,7 @@ OLAPStatus VersionGraph::delete_version_from_graph(const Version& version) {
 void VersionGraph::_add_vertex_to_graph(int64_t vertex_value) {
     // Vertex with vertex_value already exists.
     if (_vertex_index_map.find(vertex_value) != _vertex_index_map.end()) {
-        VLOG(3) << "vertex with vertex value already exists. value=" << vertex_value;
+        VLOG_NOTICE << "vertex with vertex value already exists. value=" << vertex_value;
         return;
     }
 
@@ -624,7 +624,7 @@ OLAPStatus VersionGraph::capture_consistent_versions(const Version& spec_version
 
             shortest_path_for_debug << (*version_path)[version_path->size() - 1] << ' ';
         }
-        VLOG(10) << "success to find path for spec_version. spec_version=" << spec_version
+        VLOG_TRACE << "success to find path for spec_version. spec_version=" << spec_version
                  << ", path=" << shortest_path_for_debug.str();
     }
 
diff --git a/be/src/runtime/etl_job_mgr.cpp b/be/src/runtime/etl_job_mgr.cpp
index 3ada19a..0ec8eb2 100644
--- a/be/src/runtime/etl_job_mgr.cpp
+++ b/be/src/runtime/etl_job_mgr.cpp
@@ -37,7 +37,7 @@
 
 namespace doris {
 
-#define VLOG_ETL VLOG(2)
+#define VLOG_ETL VLOG_CRITICAL
 
 std::string EtlJobMgr::to_http_path(const std::string& file_name) {
     std::stringstream url;
diff --git a/be/src/runtime/export_task_mgr.cpp b/be/src/runtime/export_task_mgr.cpp
index bf74bf7..2b1a59f 100644
--- a/be/src/runtime/export_task_mgr.cpp
+++ b/be/src/runtime/export_task_mgr.cpp
@@ -29,7 +29,7 @@
 
 namespace doris {
 
-#define VLOG_EXPORT VLOG(2)
+#define VLOG_EXPORT VLOG_CRITICAL
 
 static size_t LRU_MAX_CASH_TASK_NUM = 1000;
 
diff --git a/be/src/runtime/load_channel_mgr.cpp b/be/src/runtime/load_channel_mgr.cpp
index f1f5c74..c2feebb 100644
--- a/be/src/runtime/load_channel_mgr.cpp
+++ b/be/src/runtime/load_channel_mgr.cpp
@@ -158,7 +158,7 @@ Status LoadChannelMgr::add_batch(const PTabletWriterAddBatchRequest& request,
                     _last_success_channel->insert(load_id.to_string(), nullptr, 1, dummy_deleter);
             _last_success_channel->release(handle);
         }
-        VLOG(1) << "removed load channel " << load_id;
+        VLOG_CRITICAL << "removed load channel " << load_id;
     }
     return Status::OK();
 }
@@ -238,10 +238,10 @@ Status LoadChannelMgr::_start_load_channels_clean() {
     {
         std::vector<UniqueId> need_delete_channel_ids;
         std::lock_guard<std::mutex> l(_lock);
-        VLOG(1) << "there are " << _load_channels.size() << " running load channels";
+        VLOG_CRITICAL << "there are " << _load_channels.size() << " running load channels";
         int i = 0;
         for (auto& kv : _load_channels) {
-            VLOG(1) << "load channel[" << i++ << "]: " << *(kv.second);
+            VLOG_CRITICAL << "load channel[" << i++ << "]: " << *(kv.second);
             time_t last_updated_time = kv.second->last_updated_time();
             if (difftime(now, last_updated_time) >= kv.second->timeout()) {
                 need_delete_channel_ids.emplace_back(kv.first);
diff --git a/be/src/runtime/plan_fragment_executor.cpp b/be/src/runtime/plan_fragment_executor.cpp
index e29ee96..a001848 100644
--- a/be/src/runtime/plan_fragment_executor.cpp
+++ b/be/src/runtime/plan_fragment_executor.cpp
@@ -76,7 +76,7 @@ Status PlanFragmentExecutor::prepare(const TExecPlanFragmentParams& request,
     LOG(INFO) << "Prepare(): query_id=" << print_id(_query_id)
               << " fragment_instance_id=" << print_id(params.fragment_instance_id)
               << " backend_num=" << request.backend_num;
-    // VLOG(2) << "request:\n" << apache::thrift::ThriftDebugString(request);
+    // VLOG_CRITICAL << "request:\n" << apache::thrift::ThriftDebugString(request);
 
     const TQueryGlobals& query_globals =
             fragments_ctx == nullptr ? request.query_globals : fragments_ctx->query_globals;
@@ -177,8 +177,8 @@ Status PlanFragmentExecutor::prepare(const TExecPlanFragmentParams& request,
     std::vector<ExecNode*> scan_nodes;
     std::vector<TScanRangeParams> no_scan_ranges;
     _plan->collect_scan_nodes(&scan_nodes);
-    VLOG(1) << "scan_nodes.size()=" << scan_nodes.size();
-    VLOG(1) << "params.per_node_scan_ranges.size()=" << params.per_node_scan_ranges.size();
+    VLOG_CRITICAL << "scan_nodes.size()=" << scan_nodes.size();
+    VLOG_CRITICAL << "params.per_node_scan_ranges.size()=" << params.per_node_scan_ranges.size();
 
     _plan->try_do_aggregate_serde_improve();
 
@@ -187,7 +187,7 @@ Status PlanFragmentExecutor::prepare(const TExecPlanFragmentParams& request,
         const std::vector<TScanRangeParams>& scan_ranges =
                 find_with_default(params.per_node_scan_ranges, scan_node->id(), no_scan_ranges);
         scan_node->set_scan_ranges(scan_ranges);
-        VLOG(1) << "scan_node_Id=" << scan_node->id() << " size=" << scan_ranges.size();
+        VLOG_CRITICAL << "scan_node_Id=" << scan_node->id() << " size=" << scan_ranges.size();
     }
 
     _runtime_state->set_per_fragment_instance_idx(params.sender_id);
@@ -223,7 +223,7 @@ Status PlanFragmentExecutor::prepare(const TExecPlanFragmentParams& request,
     _row_batch.reset(new RowBatch(_plan->row_desc(), _runtime_state->batch_size(),
                                   _runtime_state->instance_mem_tracker().get()));
     // _row_batch->tuple_data_pool()->set_limits(*_runtime_state->mem_trackers());
-    VLOG(3) << "plan_root=\n" << _plan->debug_string();
+    VLOG_NOTICE << "plan_root=\n" << _plan->debug_string();
     _prepared = true;
 
     _query_statistics.reset(new QueryStatistics());
diff --git a/be/src/runtime/routine_load/data_consumer.cpp b/be/src/runtime/routine_load/data_consumer.cpp
index 2c773a4..157f69a 100644
--- a/be/src/runtime/routine_load/data_consumer.cpp
+++ b/be/src/runtime/routine_load/data_consumer.cpp
@@ -69,7 +69,7 @@ Status KafkaDataConsumer::init(StreamLoadContext* ctx) {
             LOG(WARNING) << ss.str();
             return Status::InternalError(ss.str());
         }
-        VLOG(3) << "set " << conf_key << ": " << conf_val;
+        VLOG_NOTICE << "set " << conf_key << ": " << conf_val;
         return Status::OK();
     };
 
@@ -122,7 +122,7 @@ Status KafkaDataConsumer::init(StreamLoadContext* ctx) {
         return Status::InternalError("PAUSE: failed to create kafka consumer: " + errstr);
     }
 
-    VLOG(3) << "finished to init kafka consumer. " << ctx->brief();
+    VLOG_NOTICE << "finished to init kafka consumer. " << ctx->brief();
 
     _init = true;
     return Status::OK();
diff --git a/be/src/runtime/routine_load/data_consumer.h b/be/src/runtime/routine_load/data_consumer.h
index afa4e03..f0c1c6e 100644
--- a/be/src/runtime/routine_load/data_consumer.h
+++ b/be/src/runtime/routine_load/data_consumer.h
@@ -115,7 +115,7 @@ public:
               _topic(ctx->kafka_info->topic) {}
 
     virtual ~KafkaDataConsumer() {
-        VLOG(3) << "deconstruct consumer";
+        VLOG_NOTICE << "deconstruct consumer";
         if (_k_consumer) {
             _k_consumer->close();
             delete _k_consumer;
diff --git a/be/src/runtime/routine_load/data_consumer_group.cpp b/be/src/runtime/routine_load/data_consumer_group.cpp
index 292749a..d3ffad8 100644
--- a/be/src/runtime/routine_load/data_consumer_group.cpp
+++ b/be/src/runtime/routine_load/data_consumer_group.cpp
@@ -72,7 +72,7 @@ Status KafkaDataConsumerGroup::start_all(StreamLoadContext* ctx) {
                     ctx->max_interval_s * 1000, [this, &result_st](const Status& st) {
                         std::unique_lock<std::mutex> lock(_mutex);
                         _counter--;
-                        VLOG(1) << "group counter is: " << _counter << ", grp: " << _grp_id;
+                        VLOG_CRITICAL << "group counter is: " << _counter << ", grp: " << _grp_id;
                         if (_counter == 0) {
                             _queue.shutdown();
                             LOG(INFO) << "all consumers are finished. shutdown queue. group id: "
@@ -86,7 +86,7 @@ Status KafkaDataConsumerGroup::start_all(StreamLoadContext* ctx) {
                          << ", group id: " << _grp_id;
             return Status::InternalError("failed to submit data consumer");
         } else {
-            VLOG(1) << "submit a data consumer: " << consumer->id() << ", group id: " << _grp_id;
+            VLOG_CRITICAL << "submit a data consumer: " << consumer->id() << ", group id: " << _grp_id;
         }
     }
 
@@ -162,7 +162,7 @@ Status KafkaDataConsumerGroup::start_all(StreamLoadContext* ctx) {
         RdKafka::Message* msg;
         bool res = _queue.blocking_get(&msg);
         if (res) {
-            VLOG(3) << "get kafka message"
+            VLOG_NOTICE << "get kafka message"
                     << ", partition: " << msg->partition() << ", offset: " << msg->offset()
                     << ", len: " << msg->len();
 
@@ -173,7 +173,7 @@ Status KafkaDataConsumerGroup::start_all(StreamLoadContext* ctx) {
                 left_rows--;
                 left_bytes -= msg->len();
                 cmt_offset[msg->partition()] = msg->offset();
-                VLOG(3) << "consume partition[" << msg->partition() << " - " << msg->offset()
+                VLOG_NOTICE << "consume partition[" << msg->partition() << " - " << msg->offset()
                         << "]";
             } else {
                 // failed to append this msg, we must stop
diff --git a/be/src/runtime/routine_load/data_consumer_pool.cpp b/be/src/runtime/routine_load/data_consumer_pool.cpp
index 1f75259..5b0d9fa 100644
--- a/be/src/runtime/routine_load/data_consumer_pool.cpp
+++ b/be/src/runtime/routine_load/data_consumer_pool.cpp
@@ -30,7 +30,7 @@ Status DataConsumerPool::get_consumer(StreamLoadContext* ctx, std::shared_ptr<Da
     auto iter = std::begin(_pool);
     while (iter != std::end(_pool)) {
         if ((*iter)->match(ctx)) {
-            VLOG(3) << "get an available data consumer from pool: " << (*iter)->id();
+            VLOG_NOTICE << "get an available data consumer from pool: " << (*iter)->id();
             (*iter)->reset();
             *ret = *iter;
             iter = _pool.erase(iter);
@@ -55,7 +55,7 @@ Status DataConsumerPool::get_consumer(StreamLoadContext* ctx, std::shared_ptr<Da
     // init the consumer
     RETURN_IF_ERROR(consumer->init(ctx));
 
-    VLOG(3) << "create new data consumer: " << consumer->id();
+    VLOG_NOTICE << "create new data consumer: " << consumer->id();
     *ret = consumer;
     return Status::OK();
 }
@@ -88,14 +88,14 @@ void DataConsumerPool::return_consumer(std::shared_ptr<DataConsumer> consumer) {
     std::unique_lock<std::mutex> l(_lock);
 
     if (_pool.size() == _max_pool_size) {
-        VLOG(3) << "data consumer pool is full: " << _pool.size() << "-" << _max_pool_size
+        VLOG_NOTICE << "data consumer pool is full: " << _pool.size() << "-" << _max_pool_size
                 << ", discard the returned consumer: " << consumer->id();
         return;
     }
 
     consumer->reset();
     _pool.push_back(consumer);
-    VLOG(3) << "return the data consumer: " << consumer->id()
+    VLOG_NOTICE << "return the data consumer: " << consumer->id()
             << ", current pool size: " << _pool.size();
     return;
 }
diff --git a/be/src/runtime/routine_load/routine_load_task_executor.cpp b/be/src/runtime/routine_load/routine_load_task_executor.cpp
index af4473f..74a0aa0 100644
--- a/be/src/runtime/routine_load/routine_load_task_executor.cpp
+++ b/be/src/runtime/routine_load/routine_load_task_executor.cpp
@@ -162,7 +162,7 @@ Status RoutineLoadTaskExecutor::submit_task(const TRoutineLoadTask& task) {
         return Status::InternalError("unknown load source type");
     }
 
-    VLOG(1) << "receive a new routine load task: " << ctx->brief();
+    VLOG_CRITICAL << "receive a new routine load task: " << ctx->brief();
     // register the task
     ctx->ref();
     _task_map[ctx->id] = ctx;
diff --git a/be/src/runtime/snapshot_loader.cpp b/be/src/runtime/snapshot_loader.cpp
index d347de2..ff76308 100644
--- a/be/src/runtime/snapshot_loader.cpp
+++ b/be/src/runtime/snapshot_loader.cpp
@@ -112,7 +112,7 @@ Status SnapshotLoader::upload(const std::map<std::string, std::string>& src_to_d
                 _get_existing_files_from_remote(client, dest_path, broker_prop, &remote_files));
 
         for (auto& tmp : remote_files) {
-            VLOG(2) << "get remote file: " << tmp.first << ", checksum: " << tmp.second.md5;
+            VLOG_CRITICAL << "get remote file: " << tmp.first << ", checksum: " << tmp.second.md5;
         }
 
         // 2.2 list local files
@@ -136,7 +136,7 @@ Status SnapshotLoader::upload(const std::map<std::string, std::string>& src_to_d
                 LOG(WARNING) << ss.str();
                 return Status::InternalError(ss.str());
             }
-            VLOG(2) << "get file checksum: " << local_file << ": " << md5sum;
+            VLOG_CRITICAL << "get file checksum: " << local_file << ": " << md5sum;
             local_files_with_checksum.push_back(local_file + "." + md5sum);
 
             // check if this local file need upload
@@ -155,7 +155,7 @@ Status SnapshotLoader::upload(const std::map<std::string, std::string>& src_to_d
             }
 
             if (!need_upload) {
-                VLOG(2) << "file exist in remote path, no need to upload: " << local_file;
+                VLOG_CRITICAL << "file exist in remote path, no need to upload: " << local_file;
                 continue;
             }
 
@@ -275,7 +275,7 @@ Status SnapshotLoader::download(const std::map<std::string, std::string>& src_to
 
         int64_t remote_tablet_id;
         RETURN_IF_ERROR(_get_tablet_id_from_remote_path(remote_path, &remote_tablet_id));
-        VLOG(2) << "get local tablet id: " << local_tablet_id << ", schema hash: " << schema_hash
+        VLOG_CRITICAL << "get local tablet id: " << local_tablet_id << ", schema hash: " << schema_hash
                 << ", remote tablet id: " << remote_tablet_id;
 
         // 1. get local files
@@ -327,7 +327,7 @@ Status SnapshotLoader::download(const std::map<std::string, std::string>& src_to
                                      << ". msg: " << st.get_error_msg() << ". download it";
                         need_download = true;
                     } else {
-                        VLOG(2) << "get local file checksum: " << remote_file << ": "
+                        VLOG_CRITICAL << "get local file checksum: " << remote_file << ": "
                                 << local_md5sum;
                         if (file_stat.md5 != local_md5sum) {
                             // file's checksum does not equal, download it.
@@ -366,7 +366,7 @@ Status SnapshotLoader::download(const std::map<std::string, std::string>& src_to
 
                 // 2. remove the existing local file if exist
                 if (boost::filesystem::remove(full_local_file)) {
-                    VLOG(2) << "remove the previously exist local file: " << full_local_file;
+                    VLOG_CRITICAL << "remove the previously exist local file: " << full_local_file;
                 }
                 // remove file which will be downloaded now.
                 // this file will be added to local_files if it be downloaded successfully.
@@ -381,7 +381,7 @@ Status SnapshotLoader::download(const std::map<std::string, std::string>& src_to
                 }
 
                 // 4. read remote and write to local
-                VLOG(2) << "read remote file: " << full_remote_file
+                VLOG_CRITICAL << "read remote file: " << full_remote_file
                         << " to local: " << full_local_file << ". file size: " << file_len;
                 constexpr size_t buf_sz = 1024 * 1024;
                 char read_buf[buf_sz];
@@ -417,7 +417,7 @@ Status SnapshotLoader::download(const std::map<std::string, std::string>& src_to
                 LOG(WARNING) << ss.str();
                 return Status::InternalError(ss.str());
             }
-            VLOG(2) << "get downloaded file checksum: " << full_local_file << ": "
+            VLOG_CRITICAL << "get downloaded file checksum: " << full_local_file << ": "
                     << downloaded_md5sum;
             if (downloaded_md5sum != file_stat.md5) {
                 std::stringstream ss;
@@ -444,7 +444,7 @@ Status SnapshotLoader::download(const std::map<std::string, std::string>& src_to
                              << st.get_error_msg() << ". ignore it";
                 continue;
             }
-            VLOG(2) << "new file name after replace tablet id: " << new_name;
+            VLOG_CRITICAL << "new file name after replace tablet id: " << new_name;
             const auto& find = remote_files.find(new_name);
             if (find != remote_files.end()) {
                 continue;
@@ -452,7 +452,7 @@ Status SnapshotLoader::download(const std::map<std::string, std::string>& src_to
 
             // delete
             std::string full_local_file = local_path + "/" + local_file;
-            VLOG(2) << "begin to delete local snapshot file: " << full_local_file
+            VLOG_CRITICAL << "begin to delete local snapshot file: " << full_local_file
                     << ", it does not exist in remote";
             if (remove(full_local_file.c_str()) != 0) {
                 LOG(WARNING) << "failed to delete unknown local file: " << full_local_file
@@ -546,9 +546,9 @@ Status SnapshotLoader::move(const std::string& snapshot_path, TabletSharedPtr ta
             // tablet id and schema hash from this path, which
             // means this path is a valid path.
             boost::filesystem::remove_all(tablet_dir);
-            VLOG(2) << "remove dir: " << tablet_dir;
+            VLOG_CRITICAL << "remove dir: " << tablet_dir;
             boost::filesystem::create_directory(tablet_dir);
-            VLOG(2) << "re-create dir: " << tablet_dir;
+            VLOG_CRITICAL << "re-create dir: " << tablet_dir;
         } catch (const boost::filesystem::filesystem_error& e) {
             std::stringstream ss;
             ss << "failed to move tablet path: " << tablet_path << ". err: " << e.what();
@@ -574,7 +574,7 @@ Status SnapshotLoader::move(const std::string& snapshot_path, TabletSharedPtr ta
                 return Status::InternalError("move tablet failed");
             }
             linked_files.push_back(full_dest_path);
-            VLOG(2) << "link file from " << full_src_path << " to " << full_dest_path;
+            VLOG_CRITICAL << "link file from " << full_src_path << " to " << full_dest_path;
         }
 
     } else {
@@ -631,7 +631,7 @@ Status SnapshotLoader::_get_tablet_id_and_schema_hash_from_file_path(const std::
     ss2 << tablet_str;
     ss2 >> *tablet_id;
 
-    VLOG(2) << "get tablet id " << *tablet_id << ", schema hash: " << *schema_hash
+    VLOG_CRITICAL << "get tablet id " << *tablet_id << ", schema hash: " << *schema_hash
             << " from path: " << src_path;
     return Status::OK();
 }
@@ -706,7 +706,7 @@ Status SnapshotLoader::_get_existing_files_from_remote(
             FileStat stat = {std::string(file_name, 0, pos), std::string(file_name, pos + 1),
                              file.size};
             files->emplace(std::string(file_name, 0, pos), stat);
-            VLOG(2) << "split remote file: " << std::string(file_name, 0, pos)
+            VLOG_CRITICAL << "split remote file: " << std::string(file_name, 0, pos)
                     << ", checksum: " << std::string(file_name, pos + 1);
         }
 
@@ -791,7 +791,7 @@ void SnapshotLoader::_assemble_file_name(const std::string& snapshot_path,
         << vesion_hash << "_" << seg_num << suffix;
     *tablet_file = ss2.str();
 
-    VLOG(2) << "assemble file name: " << *snapshot_file << ", " << *tablet_file;
+    VLOG_CRITICAL << "assemble file name: " << *snapshot_file << ", " << *tablet_file;
 }
 
 Status SnapshotLoader::_replace_tablet_id(const std::string& file_name, int64_t tablet_id,
diff --git a/be/src/runtime/stream_load/load_stream_mgr.h b/be/src/runtime/stream_load/load_stream_mgr.h
index 5700416..433fe9f 100644
--- a/be/src/runtime/stream_load/load_stream_mgr.h
+++ b/be/src/runtime/stream_load/load_stream_mgr.h
@@ -40,7 +40,7 @@ public:
             return Status::InternalError("id already exist");
         }
         _stream_map.emplace(id, stream);
-        VLOG(3) << "put stream load pipe: " << id;
+        VLOG_NOTICE << "put stream load pipe: " << id;
         return Status::OK();
     }
 
@@ -60,7 +60,7 @@ public:
         auto it = _stream_map.find(id);
         if (it != std::end(_stream_map)) {
             _stream_map.erase(it);
-            VLOG(3) << "remove stream load pipe: " << id;
+            VLOG_NOTICE << "remove stream load pipe: " << id;
         }
         return;
     }
diff --git a/be/src/runtime/tablets_channel.cpp b/be/src/runtime/tablets_channel.cpp
index 71cf0af..76c7166 100644
--- a/be/src/runtime/tablets_channel.cpp
+++ b/be/src/runtime/tablets_channel.cpp
@@ -198,7 +198,7 @@ Status TabletsChannel::reduce_mem_usage() {
         return Status::OK();
     }
 
-    VLOG(3) << "pick the delte writer to flush, with mem consumption: " << max_consume
+    VLOG_NOTICE << "pick the delte writer to flush, with mem consumption: " << max_consume
             << ", channel key: " << _key;
     OLAPStatus st = writer->flush_memtable_and_wait();
     if (st != OLAP_SUCCESS) {
diff --git a/be/src/service/backend_options.cpp b/be/src/service/backend_options.cpp
index 67a216d..a0b03e3 100644
--- a/be/src/service/backend_options.cpp
+++ b/be/src/service/backend_options.cpp
@@ -54,7 +54,7 @@ bool BackendOptions::init() {
     std::vector<InetAddress>::iterator addr_it = hosts.begin();
     for (; addr_it != hosts.end(); ++addr_it) {
         if ((*addr_it).is_address_v4()) {
-            VLOG(2) << "check ip=" << addr_it->get_host_address_v4();
+            VLOG_CRITICAL << "check ip=" << addr_it->get_host_address_v4();
             if ((*addr_it).is_loopback_v4()) {
                 loopback = addr_it->get_host_address_v4();
             } else if (!_s_priority_cidrs.empty()) {
diff --git a/be/src/util/cidr.cpp b/be/src/util/cidr.cpp
index 64b89a3..9351d7a 100644
--- a/be/src/util/cidr.cpp
+++ b/be/src/util/cidr.cpp
@@ -40,7 +40,7 @@ bool CIDR::reset(const std::string& cidr_str) {
     if (have_mask == -1) {
         cidr_format_str.assign(cidr_str + "/32");
     }
-    VLOG(2) << "cidr format str: " << cidr_format_str;
+    VLOG_CRITICAL << "cidr format str: " << cidr_format_str;
 
     std::vector<std::string> cidr_items = strings::Split(cidr_format_str, "/");
     if (cidr_items.size() != 2) {
diff --git a/be/src/util/mutex.cpp b/be/src/util/mutex.cpp
index c793915..d4efe0f 100644
--- a/be/src/util/mutex.cpp
+++ b/be/src/util/mutex.cpp
@@ -111,7 +111,7 @@ OLAPStatus Mutex::lock() {
 OLAPStatus Mutex::trylock() {
     int rv = pthread_mutex_trylock(&_lock);
     if (rv != 0) {
-        VLOG(3) << "failed to got the mutex lock. error=" << strerror(rv);
+        VLOG_NOTICE << "failed to got the mutex lock. error=" << strerror(rv);
         return OLAP_ERR_RWLOCK_ERROR;
     }
     return OLAP_SUCCESS;
@@ -159,7 +159,7 @@ OLAPStatus RWMutex::rdlock() {
 
 OLAPStatus RWMutex::tryrdlock() {
     if (0 != pthread_rwlock_tryrdlock(&_lock)) {
-        VLOG(3) << "failed to got the rwlock rdlock. err=" << strerror(errno);
+        VLOG_NOTICE << "failed to got the rwlock rdlock. err=" << strerror(errno);
         return OLAP_ERR_RWLOCK_ERROR;
     }
 
@@ -168,7 +168,7 @@ OLAPStatus RWMutex::tryrdlock() {
 
 OLAPStatus RWMutex::trywrlock() {
     if (0 != pthread_rwlock_trywrlock(&_lock)) {
-        VLOG(3) << "failed to got the rwlock rdlock. err=" << strerror(errno);
+        VLOG_NOTICE << "failed to got the rwlock rdlock. err=" << strerror(errno);
         return OLAP_ERR_RWLOCK_ERROR;
     }
 
diff --git a/be/src/util/progress_updater.cpp b/be/src/util/progress_updater.cpp
index c88504e..ebcdbc0 100644
--- a/be/src/util/progress_updater.cpp
+++ b/be/src/util/progress_updater.cpp
@@ -23,15 +23,13 @@ namespace doris {
 
 ProgressUpdater::ProgressUpdater(const std::string& label, int64_t total, int period)
         : _label(label),
-          _logging_level(2),
           _total(total),
           _update_period(period),
           _num_complete(0),
           _last_output_percentage(0) {}
 
 ProgressUpdater::ProgressUpdater()
-        : _logging_level(2),
-          _total(0),
+        : _total(0),
           _update_period(0),
           _num_complete(0),
           _last_output_percentage(0) {}
@@ -52,7 +50,7 @@ void ProgressUpdater::update(int64_t delta) {
 
     if (num_complete >= _total) {
         // Always print the final 100% complete
-        VLOG(_logging_level) << _label << " 100\% Complete (" << num_complete << " out of "
+        VLOG_DEBUG << _label << " 100\% Complete (" << num_complete << " out of "
                              << _total << ")";
         return;
     }
@@ -63,7 +61,7 @@ void ProgressUpdater::update(int64_t delta) {
     if (new_percentage - old_percentage > _update_period) {
         // Only update shared variable if this guy was the latest.
         __sync_val_compare_and_swap(&_last_output_percentage, old_percentage, new_percentage);
-        VLOG(_logging_level) << _label << ": " << new_percentage << "\% Complete (" << num_complete
+        VLOG_DEBUG << _label << ": " << new_percentage << "\% Complete (" << num_complete
                              << " out of " << _total << ")";
     }
 }
diff --git a/be/src/util/progress_updater.h b/be/src/util/progress_updater.h
index 15c211e..a47135d 100644
--- a/be/src/util/progress_updater.h
+++ b/be/src/util/progress_updater.h
@@ -40,10 +40,6 @@ public:
 
     ProgressUpdater();
 
-    // Sets the GLOG level for this progress updater.  By default, this will use
-    // 2 but objects can override it.
-    void set_logging_level(int level) { _logging_level = level; }
-
     // 'delta' more of the work has been complete.  Will potentially output to
     // VLOG_PROGRESS
     void update(int64_t delta);
@@ -56,7 +52,6 @@ public:
 
 private:
     std::string _label;
-    int _logging_level;
     int64_t _total;
     int _update_period;
     int64_t _num_complete;
diff --git a/be/src/util/tdigest.h b/be/src/util/tdigest.h
index 92ce22b..09a188e 100644
--- a/be/src/util/tdigest.h
+++ b/be/src/util/tdigest.h
@@ -279,15 +279,15 @@ public:
 
     // return the cdf on the processed values
     Value cdfProcessed(Value x) const {
-        VLOG(1) << "cdf value " << x;
-        VLOG(1) << "processed size " << _processed.size();
+        VLOG_CRITICAL << "cdf value " << x;
+        VLOG_CRITICAL << "processed size " << _processed.size();
         if (_processed.size() == 0) {
             // no data to examine
-            VLOG(1) << "no processed values";
+            VLOG_CRITICAL << "no processed values";
 
             return 0.0;
         } else if (_processed.size() == 1) {
-            VLOG(1) << "one processed value "
+            VLOG_CRITICAL << "one processed value "
                     << " _min " << _min << " _max " << _max;
             // exactly one centroid, should have _max==_min
             auto width = _max - _min;
@@ -305,20 +305,20 @@ public:
         } else {
             auto n = _processed.size();
             if (x <= _min) {
-                VLOG(1) << "below _min "
+                VLOG_CRITICAL << "below _min "
                         << " _min " << _min << " x " << x;
                 return 0;
             }
 
             if (x >= _max) {
-                VLOG(1) << "above _max "
+                VLOG_CRITICAL << "above _max "
                         << " _max " << _max << " x " << x;
                 return 1;
             }
 
             // check for the left tail
             if (x <= mean(0)) {
-                VLOG(1) << "left tail "
+                VLOG_CRITICAL << "left tail "
                         << " _min " << _min << " mean(0) " << mean(0) << " x " << x;
 
                 // note that this is different than mean(0) > _min ... this guarantees interpolation works
@@ -331,7 +331,7 @@ public:
 
             // and the right tail
             if (x >= mean(n - 1)) {
-                VLOG(1) << "right tail"
+                VLOG_CRITICAL << "right tail"
                         << " _max " << _max << " mean(n - 1) " << mean(n - 1) << " x " << x;
 
                 if (_max - mean(n - 1) > 0) {
@@ -351,7 +351,7 @@ public:
             auto z2 = (iter)->mean() - x;
             DCHECK_LE(0.0, z1);
             DCHECK_LE(0.0, z2);
-            VLOG(1) << "middle "
+            VLOG_CRITICAL << "middle "
                     << " z1 " << z1 << " z2 " << z2 << " x " << x;
 
             return weightedAverage(_cumulative[i - 1], z2, _cumulative[i], z1) / _processed_weight;
@@ -368,7 +368,7 @@ public:
     // the value will not represent the unprocessed values
     Value quantileProcessed(Value q) const {
         if (q < 0 || q > 1) {
-            VLOG(1) << "q should be in [0,1], got " << q;
+            VLOG_CRITICAL << "q should be in [0,1], got " << q;
             return NAN;
         }
 
@@ -399,7 +399,7 @@ public:
             auto i = std::distance(_cumulative.cbegin(), iter);
             auto z1 = index - *(iter - 1);
             auto z2 = *(iter)-index;
-            // VLOG(1) << "z2 " << z2 << " index " << index << " z1 " << z1;
+            // VLOG_CRITICAL << "z2 " << z2 << " index " << index << " z1 " << z1;
             return weightedAverage(mean(i - 1), z2, mean(i), z1);
         }
 
@@ -597,7 +597,7 @@ private:
         }
 
         std::vector<Centroid> sorted;
-        VLOG(1) << "total " << total;
+        VLOG_CRITICAL << "total " << total;
         sorted.reserve(total);
 
         while (!pq.empty()) {
@@ -667,9 +667,9 @@ private:
         }
         _unprocessed.clear();
         _min = std::min(_min, _processed[0].mean());
-        VLOG(1) << "new _min " << _min;
+        VLOG_CRITICAL << "new _min " << _min;
         _max = std::max(_max, (_processed.cend() - 1)->mean());
-        VLOG(1) << "new _max " << _max;
+        VLOG_CRITICAL << "new _max " << _max;
         updateCumulative();
     }
 
@@ -684,12 +684,12 @@ private:
             auto dq = w / total;
             auto k2 = integratedLocation(q + dq);
             if (k2 - k1 > 1 && w != 1) {
-                VLOG(1) << "Oversize centroid at " << std::distance(sorted.cbegin(), iter) << " k1 "
+                VLOG_CRITICAL << "Oversize centroid at " << std::distance(sorted.cbegin(), iter) << " k1 "
                         << k1 << " k2 " << k2 << " dk " << (k2 - k1) << " w " << w << " q " << q;
                 badWeight++;
             }
             if (k2 - k1 > 1.5 && w != 1) {
-                VLOG(1) << "Egregiously Oversize centroid at "
+                VLOG_CRITICAL << "Egregiously Oversize centroid at "
                         << std::distance(sorted.cbegin(), iter) << " k1 " << k1 << " k2 " << k2
                         << " dk " << (k2 - k1) << " w " << w << " q " << q;
                 badWeight++;
diff --git a/be/src/util/thread.cpp b/be/src/util/thread.cpp
index 3c7a45b..acf8268 100644
--- a/be/src/util/thread.cpp
+++ b/be/src/util/thread.cpp
@@ -349,7 +349,7 @@ Status Thread::start_thread(const std::string& category, const std::string& name
     t->_joinable = true;
     cleanup.cancel();
 
-    VLOG(3) << "Started thread " << t->tid() << " - " << category << ":" << name;
+    VLOG_NOTICE << "Started thread " << t->tid() << " - " << category << ":" << name;
     return Status::OK();
 }
 
@@ -400,7 +400,7 @@ void Thread::finish_thread(void* arg) {
     // Signal any Joiner that we're done.
     t->_done.count_down();
 
-    VLOG(2) << "Ended thread " << t->_tid << " - " << t->category() << ":" << t->name();
+    VLOG_CRITICAL << "Ended thread " << t->_tid << " - " << t->category() << ":" << t->name();
     t->Release();
     // NOTE: the above 'Release' call could be the last reference to 'this',
     // so 'this' could be destructed at this point. Do not add any code
diff --git a/be/src/util/threadpool.cpp b/be/src/util/threadpool.cpp
index afe6f14..f2a0f18 100644
--- a/be/src/util/threadpool.cpp
+++ b/be/src/util/threadpool.cpp
@@ -494,7 +494,7 @@ void ThreadPool::dispatch_thread() {
     while (true) {
         // Note: Status::Aborted() is used to indicate normal shutdown.
         if (!_pool_status.ok()) {
-            VLOG(2) << "DispatchThread exiting: " << _pool_status.to_string();
+            VLOG_CRITICAL << "DispatchThread exiting: " << _pool_status.to_string();
             break;
         }
 
@@ -522,7 +522,7 @@ void ThreadPool::dispatch_thread() {
                     // protecting the state, signal, and release again before we get the mutex. So,
                     // we'll recheck the empty queue case regardless.
                     if (_queue.empty()) {
-                        VLOG(3) << "Releasing worker thread from pool " << _name << " after "
+                        VLOG_NOTICE << "Releasing worker thread from pool " << _name << " after "
                                 << _idle_timeout.ToMilliseconds() << "ms of idle time.";
                         break;
                     }
diff --git a/be/src/util/thrift_client.cpp b/be/src/util/thrift_client.cpp
index 2a92531..1ebaca1 100644
--- a/be/src/util/thrift_client.cpp
+++ b/be/src/util/thrift_client.cpp
@@ -35,7 +35,7 @@ Status ThriftClientImpl::open() {
         try {
             _transport->close();
         } catch (const apache::thrift::transport::TTransportException& e) {
-            VLOG(1) << "Error closing socket to: " << ipaddress() << ":" << port() << ", ignoring ("
+            VLOG_CRITICAL << "Error closing socket to: " << ipaddress() << ":" << port() << ", ignoring ("
                     << e.what() << ")";
         }
         // In certain cases in which the remote host is overloaded, this failure can
@@ -43,7 +43,7 @@ Status ThriftClientImpl::open() {
         // trace as there aren't many callers of this function.
         const std::string& err_msg = strings::Substitute("Couldn't open transport for $0:$1 ($2)",
                                                          ipaddress(), port(), e.what());
-        VLOG(1) << err_msg;
+        VLOG_CRITICAL << err_msg;
         return Status::ThriftRpcError(err_msg);
     }
     return Status::OK();
diff --git a/be/src/util/thrift_util.h b/be/src/util/thrift_util.h
index 5ee80b6..9bdfd40 100644
--- a/be/src/util/thrift_util.h
+++ b/be/src/util/thrift_util.h
@@ -153,7 +153,7 @@ Status deserialize_thrift_msg(const uint8_t* buf, uint32_t* len, bool compact,
     return Status::OK();
 }
 
-// Redirects all Thrift logging to VLOG(1)
+// Redirects all Thrift logging to VLOG_CRITICAL
 void init_thrift_logging();
 
 // Wait for a server that is running locally to start accepting
diff --git a/be/test/exec/new_olap_scan_node_test.cpp b/be/test/exec/new_olap_scan_node_test.cpp
index bd672e5..1309620 100644
--- a/be/test/exec/new_olap_scan_node_test.cpp
+++ b/be/test/exec/new_olap_scan_node_test.cpp
@@ -264,7 +264,7 @@ public:
                 str << row[18] << " ";
                 str << row[20] << ")]";
                 data->push_back(str.str());
-                VLOG(3) << "Read Row: " << str.str();
+                VLOG_NOTICE << "Read Row: " << str.str();
             }
         }
     }
@@ -296,7 +296,7 @@ TEST_F(TestOlapScanNode, SimpleTest) {
         row_batch.reset();
         status = scan_node.get_next(&_runtime_stat, &row_batch, &eos);
         ASSERT_TRUE(status.ok());
-        VLOG(1) << "num_rows: " << row_batch.num_rows();
+        VLOG_CRITICAL << "num_rows: " << row_batch.num_rows();
         num_rows += row_batch.num_rows();
     }
 
@@ -329,7 +329,7 @@ TEST_F(TestOlapScanNode, MultiColumnSingleVersionTest) {
 
         for (int i = 0; i < row_batch.num_rows(); ++i) {
             TupleRow* row = row_batch.get_row(i);
-            VLOG(3) << "input row: " << print_row(row, scan_node._row_descriptor);
+            VLOG_NOTICE << "input row: " << print_row(row, scan_node._row_descriptor);
             ASSERT_LT(data_index, data.size());
             ASSERT_EQ(data[data_index], print_row(row, scan_node._row_descriptor));
             ++data_index;
@@ -367,7 +367,7 @@ TEST_F(TestOlapScanNode, MultiColumnMultiVersionTest) {
 
         for (int i = 0; i < row_batch.num_rows(); ++i) {
             TupleRow* row = row_batch.get_row(i);
-            VLOG(3) << "input row: " << print_row(row, scan_node._row_descriptor);
+            VLOG_NOTICE << "input row: " << print_row(row, scan_node._row_descriptor);
             ASSERT_LT(data_index, data.size());
             ASSERT_EQ(data[data_index], print_row(row, scan_node._row_descriptor));
             ++data_index;
diff --git a/be/test/exprs/binary_predicate_test.cpp b/be/test/exprs/binary_predicate_test.cpp
index 5186505..43342b9 100644
--- a/be/test/exprs/binary_predicate_test.cpp
+++ b/be/test/exprs/binary_predicate_test.cpp
@@ -223,7 +223,7 @@ TEST_F(BinaryOpTest, SimplePerformanceTest) {
         }
 
         uint64_t vec_time = stopwatch.elapsed_time();
-        VLOG(1) << PrettyPrinter::print(vec_time, TCounterType::TIME_NS);
+        VLOG_CRITICAL << PrettyPrinter::print(vec_time, TCounterType::TIME_NS);
 
         stopwatch.start();
 
@@ -234,9 +234,9 @@ TEST_F(BinaryOpTest, SimplePerformanceTest) {
         }
 
         uint64_t row_time = stopwatch.elapsed_time();
-        VLOG(1) << PrettyPrinter::print(row_time, TCounterType::TIME_NS);
+        VLOG_CRITICAL << PrettyPrinter::print(row_time, TCounterType::TIME_NS);
 
-        VLOG(1) << "capacity: " << capacity << " multiple: " << row_time / vec_time;
+        VLOG_CRITICAL << "capacity: " << capacity << " multiple: " << row_time / vec_time;
     }
 }
 
diff --git a/be/test/exprs/in_op_test.cpp b/be/test/exprs/in_op_test.cpp
index 67fb179..ebdb0d7 100644
--- a/be/test/exprs/in_op_test.cpp
+++ b/be/test/exprs/in_op_test.cpp
@@ -212,7 +212,7 @@ TEST_F(InOpTest, SimplePerformanceTest) {
         }
 
         uint64_t vec_time = stopwatch.elapsed_time();
-        VLOG(1) << PrettyPrinter::print(vec_time, TCounterType::TIME_NS);
+        VLOG_CRITICAL << PrettyPrinter::print(vec_time, TCounterType::TIME_NS);
 
         stopwatch.start();
 
@@ -223,9 +223,9 @@ TEST_F(InOpTest, SimplePerformanceTest) {
         }
 
         uint64_t row_time = stopwatch.elapsed_time();
-        VLOG(1) << PrettyPrinter::print(row_time, TCounterType::TIME_NS);
+        VLOG_CRITICAL << PrettyPrinter::print(row_time, TCounterType::TIME_NS);
 
-        VLOG(1) << "capacity: " << capacity << " multiple: " << row_time / vec_time;
+        VLOG_CRITICAL << "capacity: " << capacity << " multiple: " << row_time / vec_time;
     }
 }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org