You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kudu.apache.org by aw...@apache.org on 2019/02/20 23:20:02 UTC
[kudu] 01/02: [tablet] flush_threshold_{mb,
seconds} are run-time flags
This is an automated email from the ASF dual-hosted git repository.
awong pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git
commit 87084f44d0a91fa0380a4cfa81c2dc7897efef01
Author: Alexey Serbin <al...@apache.org>
AuthorDate: Wed Feb 20 12:00:53 2019 -0800
[tablet] flush_threshold_{mb,seconds} are run-time flags
De facto, the way how the --flush_threshold_mb and
--flush_threshold_seconds flags are used in the code suggests
they can be changed at runtime. This patch marks them so accordingly.
Change-Id: Ib970c1fdb75f3df09c4c69b0f51d9955274f266b
Reviewed-on: http://gerrit.cloudera.org:8080/12537
Reviewed-by: Adar Dembo <ad...@cloudera.com>
Tested-by: Kudu Jenkins
---
src/kudu/tablet/tablet_replica_mm_ops.cc | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/src/kudu/tablet/tablet_replica_mm_ops.cc b/src/kudu/tablet/tablet_replica_mm_ops.cc
index b16fae3..a7267de 100644
--- a/src/kudu/tablet/tablet_replica_mm_ops.cc
+++ b/src/kudu/tablet/tablet_replica_mm_ops.cc
@@ -62,11 +62,13 @@ DEFINE_int32(flush_threshold_mb, 1024,
"A MRS can still flush below this threshold if it if hasn't flushed in a while, "
"or if the server-wide memory limit has been reached.");
TAG_FLAG(flush_threshold_mb, experimental);
+TAG_FLAG(flush_threshold_mb, runtime);
DEFINE_int32(flush_threshold_secs, 2 * 60,
"Number of seconds after which a non-empty MemRowSet will become flushable "
"even if it is not large.");
TAG_FLAG(flush_threshold_secs, experimental);
+TAG_FLAG(flush_threshold_secs, runtime);
METRIC_DEFINE_gauge_uint32(tablet, log_gc_running,
@@ -94,14 +96,15 @@ const double kFlushUpperBoundMs = 60 * 60 * 1000;
void FlushOpPerfImprovementPolicy::SetPerfImprovementForFlush(MaintenanceOpStats* stats,
double elapsed_ms) {
double anchored_mb = static_cast<double>(stats->ram_anchored()) / (1024 * 1024);
- if (anchored_mb > FLAGS_flush_threshold_mb) {
+ const double threshold_mb = FLAGS_flush_threshold_mb;
+ if (anchored_mb > threshold_mb) {
// If we're over the user-specified flush threshold, then consider the perf
// improvement to be 1 for every extra MB. This produces perf_improvement results
// which are much higher than most compactions would produce, and means that, when
// there is an MRS over threshold, a flush will almost always be selected instead of
// a compaction. That's not necessarily a good thing, but in the absence of better
// heuristics, it will do for now.
- double extra_mb = anchored_mb - static_cast<double>(FLAGS_flush_threshold_mb);
+ double extra_mb = anchored_mb - threshold_mb;
DCHECK_GE(extra_mb, 0);
stats->set_perf_improvement(extra_mb);
} else if (elapsed_ms > FLAGS_flush_threshold_secs * 1000) {