You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by ta...@apache.org on 2019/02/10 20:24:53 UTC

[impala] 07/09: IMPALA-5043: diagnostics for topic staleness in AC

This is an automated email from the ASF dual-hosted git repository.

tarmstrong pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit c3c69ae362ae06adcf304b8610221a656e7b2eba
Author: Tim Armstrong <ta...@cloudera.com>
AuthorDate: Thu Feb 7 16:50:54 2019 -0800

    IMPALA-5043: diagnostics for topic staleness in AC
    
    The default threshold for the admission control topic to be considered
    stale is 5s.
    
    Adds diagnostics for stale topic updates:
    * A banner on the /admission web UI if the topic is considered stale.
    * Time since last update on the /admission web UI
    * Append a warning to rejection/queuing messages where topic staleness
      may have affected the decision.
    * Append a warning to profiles of admitted queries where the topic was
      stale at the time the query was admitted.
    * Include the topic staleness in all profiles of admitted queries
    
    Testing:
    Add a custom cluster test that kills the statestore, validates that
    admission control behaves as expected and that stateless warnings
    show up in the appropriate places.
    
    Change-Id: Ib9e26adb6419589ccf7625e423356df45bee4ac9
    Reviewed-on: http://gerrit.cloudera.org:8080/12407
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 be/src/scheduling/admission-controller.cc         | 91 ++++++++++++++++++-----
 be/src/scheduling/admission-controller.h          | 19 ++++-
 be/src/service/impala-http-handler.cc             | 20 +++--
 tests/common/impala_test_suite.py                 | 17 ++++-
 tests/custom_cluster/test_admission_controller.py | 79 +++++++++++++++++++-
 www/admission_controller.tmpl                     | 16 +++-
 6 files changed, 208 insertions(+), 34 deletions(-)

diff --git a/be/src/scheduling/admission-controller.cc b/be/src/scheduling/admission-controller.cc
index 77e6d4b..ace09e5 100644
--- a/be/src/scheduling/admission-controller.cc
+++ b/be/src/scheduling/admission-controller.cc
@@ -39,6 +39,18 @@ using namespace strings;
 DEFINE_int64(queue_wait_timeout_ms, 60 * 1000, "Maximum amount of time (in "
     "milliseconds) that a request will wait to be admitted before timing out.");
 
+// The stale topic warning threshold is made configurable to allow suppressing the
+// error if it turns out to be noisy on some deployments or allow lowering the
+// threshold to help debug admission control issues. Hidden so that we have the
+// option of making this a no-op later.
+DEFINE_int64_hidden(admission_control_stale_topic_threshold_ms, 5 * 1000,
+    "Threshold above which the admission controller will append warnings to "
+    "error messages and profiles warning that the admission control topic is "
+    "stale so that the admission control decision may have been based on stale "
+    "state data. The default, 5 seconds, is chosen to minimise false positives but "
+    "capture most cases where the Impala daemon is disconnected from the statestore "
+    "or topic updates are seriously delayed.");
+
 namespace impala {
 
 const int64_t AdmissionController::PoolStats::HISTOGRAM_NUM_OF_BINS = 128;
@@ -124,6 +136,10 @@ const string AdmissionController::PROFILE_INFO_KEY_LAST_QUEUED_REASON =
     "Latest admission queue reason";
 const string AdmissionController::PROFILE_INFO_KEY_ADMITTED_MEM =
     "Cluster Memory Admitted";
+const string AdmissionController::PROFILE_INFO_KEY_STALENESS_WARNING =
+    "Admission control state staleness";
+const string AdmissionController::PROFILE_TIME_SINCE_LAST_UPDATE_COUNTER_NAME =
+    "AdmissionControlTimeSinceLastUpdate";
 
 // Error status string details
 const string REASON_MEM_LIMIT_TOO_LOW_FOR_RESERVATION =
@@ -145,7 +161,8 @@ const string REASON_MIN_RESERVATION_OVER_POOL_MEM =
 const string REASON_DISABLED_MAX_MEM_RESOURCES =
     "disabled by pool max mem resources set to 0";
 const string REASON_DISABLED_REQUESTS_LIMIT = "disabled by requests limit set to 0";
-const string REASON_QUEUE_FULL = "queue full, limit=$0, num_queued=$1";
+// $2 is the staleness detail.
+const string REASON_QUEUE_FULL = "queue full, limit=$0, num_queued=$1.$2";
 const string REASON_REQ_OVER_POOL_MEM =
     "request memory needed $0 is greater than pool max mem resources $1.\n\n"
     "Use the MEM_LIMIT query option to indicate how much memory is required per node. "
@@ -163,17 +180,19 @@ const string REASON_THREAD_RESERVATION_AGG_LIMIT_EXCEEDED =
     "THREAD_RESERVATION_AGGREGATE_LIMIT query option value: $1 > $2.";
 
 // Queue decision details
-// $0 = num running queries, $1 = num queries limit
-const string QUEUED_NUM_RUNNING = "number of running queries $0 is at or over limit $1";
-// $0 = queue size
+// $0 = num running queries, $1 = num queries limit, $2 = staleness detail
+const string QUEUED_NUM_RUNNING =
+    "number of running queries $0 is at or over limit $1.$2";
+// $0 = queue size, $1 = staleness detail
 const string QUEUED_QUEUE_NOT_EMPTY = "queue is not empty (size $0); queued queries are "
-    "executed first";
-// $0 = pool name, $1 = pool max memory, $2 = pool mem needed, $3 = pool mem available
+    "executed first.$1";
+// $0 = pool name, $1 = pool max memory, $2 = pool mem needed, $3 = pool mem available,
+// $4 = staleness detail
 const string POOL_MEM_NOT_AVAILABLE = "Not enough aggregate memory available in pool $0 "
-    "with max mem resources $1. Needed $2 but only $3 was available.";
-// $0 = host name, $1 = host mem needed, $3 = host mem available
+    "with max mem resources $1. Needed $2 but only $3 was available.$4";
+// $0 = host name, $1 = host mem needed, $3 = host mem available, $4 = staleness detail
 const string HOST_MEM_NOT_AVAILABLE = "Not enough memory available on host $0."
-    "Needed $1 but only $2 out of $3 was available.";
+    "Needed $1 but only $2 out of $3 was available.$4";
 
 // Parses the pool name and backend_id from the topic key if it is valid.
 // Returns true if the topic key is valid and pool_name and backend_id are set.
@@ -265,10 +284,9 @@ Status AdmissionController::Init() {
   auto cb = [this](
       const StatestoreSubscriber::TopicDeltaMap& state,
       vector<TTopicDelta>* topic_updates) { UpdatePoolStats(state, topic_updates); };
-  Status status =
-      subscriber_->AddTopic(Statestore::IMPALA_REQUEST_QUEUE_TOPIC,
-          /* is_transient=*/ true, /* populate_min_subscriber_topic_version=*/ false,
-          /* filter_prefix=*/"", cb);
+  Status status = subscriber_->AddTopic(Statestore::IMPALA_REQUEST_QUEUE_TOPIC,
+      /* is_transient=*/true, /* populate_min_subscriber_topic_version=*/false,
+      /* filter_prefix=*/"", cb);
   if (!status.ok()) {
     status.AddDetail("AdmissionController failed to register request queue topic");
   }
@@ -397,7 +415,8 @@ bool AdmissionController::HasAvailableMemResources(const QuerySchedule& schedule
   if (stats->EffectiveMemReserved() + cluster_mem_to_admit > pool_max_mem) {
     *mem_unavailable_reason = Substitute(POOL_MEM_NOT_AVAILABLE, pool_name,
         PrintBytes(pool_max_mem), PrintBytes(cluster_mem_to_admit),
-        PrintBytes(max(pool_max_mem - stats->EffectiveMemReserved(), 0L)));
+        PrintBytes(max(pool_max_mem - stats->EffectiveMemReserved(), 0L)),
+        GetStalenessDetailLocked(" "));
     return false;
   }
 
@@ -418,7 +437,7 @@ bool AdmissionController::HasAvailableMemResources(const QuerySchedule& schedule
       *mem_unavailable_reason =
           Substitute(HOST_MEM_NOT_AVAILABLE, host_id, PrintBytes(per_host_mem_to_admit),
               PrintBytes(max(admit_mem_limit - effective_host_mem_reserved, 0L)),
-              PrintBytes(admit_mem_limit));
+              PrintBytes(admit_mem_limit), GetStalenessDetailLocked(" "));
       return false;
     }
   }
@@ -449,12 +468,12 @@ bool AdmissionController::CanAdmitRequest(const QuerySchedule& schedule,
   PoolStats* stats = GetPoolStats(pool_name);
   if (!admit_from_queue && stats->local_stats().num_queued > 0) {
     *not_admitted_reason = Substitute(QUEUED_QUEUE_NOT_EMPTY,
-        stats->local_stats().num_queued);
+        stats->local_stats().num_queued, GetStalenessDetailLocked(" "));
     return false;
   } else if (pool_cfg.max_requests >= 0 &&
       stats->agg_num_running() >= pool_cfg.max_requests) {
     *not_admitted_reason = Substitute(QUEUED_NUM_RUNNING, stats->agg_num_running(),
-        pool_cfg.max_requests);
+        pool_cfg.max_requests, GetStalenessDetailLocked(" "));
     return false;
   } else if (!HasAvailableMemResources(schedule, pool_cfg, not_admitted_reason)) {
     return false;
@@ -565,7 +584,7 @@ bool AdmissionController::RejectImmediately(const QuerySchedule& schedule,
   PoolStats* stats = GetPoolStats(schedule.request_pool());
   if (stats->agg_num_queued() >= pool_cfg.max_queued) {
     *rejection_reason = Substitute(REASON_QUEUE_FULL, pool_cfg.max_queued,
-        stats->agg_num_queued());
+        stats->agg_num_queued(), GetStalenessDetailLocked(" "));
     return true;
   }
 
@@ -763,6 +782,7 @@ void AdmissionController::UpdatePoolStats(
       HandleTopicUpdates(delta.topic_entries);
     }
     UpdateClusterAggregates();
+    last_topic_update_time_ms_ = MonotonicMillis();
   }
   dequeue_cv_.NotifyOne(); // Dequeue and admit queries on the dequeue thread
 }
@@ -968,7 +988,8 @@ void AdmissionController::DequeueLoop() {
         if (total_available <= 0) {
           if (!queue.empty()) {
             LogDequeueFailed(queue.head(),
-                Substitute(QUEUED_NUM_RUNNING, stats->agg_num_running(), max_requests));
+                Substitute(QUEUED_NUM_RUNNING, stats->agg_num_running(), max_requests,
+                    GetStalenessDetailLocked(" ")));
           }
           continue;
         }
@@ -1077,6 +1098,38 @@ void AdmissionController::AdmitQuery(QuerySchedule* schedule, bool was_queued) {
       PROFILE_INFO_KEY_ADMISSION_RESULT, admission_result);
   schedule->summary_profile()->AddInfoString(
       PROFILE_INFO_KEY_ADMITTED_MEM, PrintBytes(schedule->GetClusterMemoryToAdmit()));
+  // We may have admitted based on stale information. Include a warning in the profile
+  // if this this may be the case.
+  int64_t time_since_update_ms;
+  string staleness_detail = GetStalenessDetailLocked("", &time_since_update_ms);
+  COUNTER_SET(ADD_COUNTER(schedule->summary_profile(),
+      PROFILE_TIME_SINCE_LAST_UPDATE_COUNTER_NAME, TUnit::TIME_MS), time_since_update_ms);
+  if (!staleness_detail.empty()) {
+    schedule->summary_profile()->AddInfoString(
+        PROFILE_INFO_KEY_STALENESS_WARNING, staleness_detail);
+  }
+
+}
+
+string AdmissionController::GetStalenessDetail(const string& prefix,
+    int64_t* ms_since_last_update) {
+  lock_guard<mutex> lock(admission_ctrl_lock_);
+  return GetStalenessDetailLocked(prefix, ms_since_last_update);
+}
+
+string AdmissionController::GetStalenessDetailLocked(const string& prefix,
+    int64_t* ms_since_last_update) {
+  int64_t ms_since_update = MonotonicMillis() - last_topic_update_time_ms_;
+  if (ms_since_last_update != nullptr) *ms_since_last_update = ms_since_update;
+  if (last_topic_update_time_ms_ == 0) {
+    return Substitute("$0Warning: admission control information from statestore "
+                      "is stale: no update has been received.", prefix);
+  } else if (ms_since_update >= FLAGS_admission_control_stale_topic_threshold_ms) {
+    return Substitute("$0Warning: admission control information from statestore "
+                      "is stale: $1 since last update was received.",
+        prefix, PrettyPrinter::Print(ms_since_update, TUnit::TIME_MS));
+  }
+  return "";
 }
 
 void AdmissionController::PoolToJsonLocked(const string& pool_name,
diff --git a/be/src/scheduling/admission-controller.h b/be/src/scheduling/admission-controller.h
index c13d65f..14d59a9 100644
--- a/be/src/scheduling/admission-controller.h
+++ b/be/src/scheduling/admission-controller.h
@@ -226,6 +226,8 @@ class AdmissionController {
   static const string PROFILE_INFO_VAL_INITIAL_QUEUE_REASON;
   static const string PROFILE_INFO_KEY_LAST_QUEUED_REASON;
   static const string PROFILE_INFO_KEY_ADMITTED_MEM;
+  static const string PROFILE_INFO_KEY_STALENESS_WARNING;
+  static const string PROFILE_TIME_SINCE_LAST_UPDATE_COUNTER_NAME;
 
   AdmissionController(StatestoreSubscriber* subscriber,
       RequestPoolService* request_pool_service, MetricGroup* metrics,
@@ -277,6 +279,13 @@ class AdmissionController {
   void PopulatePerHostMemReservedAndAdmitted(
       std::unordered_map<std::string, std::pair<int64_t, int64_t>>* mem_map);
 
+  /// Returns a non-empty string with a warning if the admission control data is stale.
+  /// 'prefix' is added to the start of the string. Returns an empty string if not stale.
+  /// If 'ms_since_last_update' is non-null, set it to the time in ms since last update.
+  /// Caller must not hold 'admission_ctrl_lock_'.
+  std::string GetStalenessDetail(const std::string& prefix,
+      int64_t* ms_since_last_update = nullptr);
+
  private:
   class PoolStats;
   friend class PoolStats;
@@ -304,6 +313,10 @@ class AdmissionController {
   /// Protects all access to all variables below.
   boost::mutex admission_ctrl_lock_;
 
+  /// The last time a topic update was processed. Time is obtained from
+  /// MonotonicMillis(), or is 0 if an update was never received.
+  int64_t last_topic_update_time_ms_ = 0;
+
   /// Maps from host id to memory reserved and memory admitted, both aggregates over all
   /// pools. See the class doc for a detailed definition of reserved and admitted.
   /// Protected by admission_ctrl_lock_.
@@ -609,8 +622,12 @@ class AdmissionController {
   /// Is a helper method used by both PoolToJson() and AllPoolsToJson()
   void PoolToJsonLocked(const string& pool_name, rapidjson::Value* resource_pools,
       rapidjson::Document* document);
+
+  /// Same as GetStalenessDetail() except caller must hold 'admission_ctrl_lock_'.
+  std::string GetStalenessDetailLocked(const std::string& prefix,
+      int64_t* ms_since_last_update = nullptr);
 };
 
-}
+} // namespace impala
 
 #endif // SCHEDULING_ADMISSION_CONTROLLER_H
diff --git a/be/src/service/impala-http-handler.cc b/be/src/service/impala-http-handler.cc
index 4a55e66..b3145bf 100644
--- a/be/src/service/impala-http-handler.cc
+++ b/be/src/service/impala-http-handler.cc
@@ -880,15 +880,14 @@ void ImpalaHttpHandler::BackendsHandler(const Webserver::ArgumentMap& args,
 
 void ImpalaHttpHandler::AdmissionStateHandler(
     const Webserver::ArgumentMap& args, Document* document) {
+  AdmissionController* ac = ExecEnv::GetInstance()->admission_controller();
   Webserver::ArgumentMap::const_iterator pool_name_arg = args.find("pool_name");
   bool get_all_pools = (pool_name_arg == args.end());
   Value resource_pools(kArrayType);
-  if(get_all_pools){
-    ExecEnv::GetInstance()->admission_controller()->AllPoolsToJson(
-        &resource_pools, document);
+  if (get_all_pools) {
+    ac->AllPoolsToJson( &resource_pools, document);
   } else {
-    ExecEnv::GetInstance()->admission_controller()->PoolToJson(
-        pool_name_arg->second, &resource_pools, document);
+    ac->PoolToJson(pool_name_arg->second, &resource_pools, document);
   }
 
   // Now get running queries from CRS map.
@@ -938,6 +937,9 @@ void ImpalaHttpHandler::AdmissionStateHandler(
     resource_pools[i].GetObject().AddMember(
         "running_queries", queries_in_pool, document->GetAllocator());
   }
+  int64_t ms_since_last_statestore_update;
+  string staleness_detail = ac->GetStalenessDetail("", &ms_since_last_statestore_update);
+
   // In order to embed a plain json inside the webpage generated by mustache, we need
   // to stringify it and write it out as a json element.
   rapidjson::StringBuffer strbuf;
@@ -946,6 +948,14 @@ void ImpalaHttpHandler::AdmissionStateHandler(
   Value raw_json(strbuf.GetString(), document->GetAllocator());
   document->AddMember("resource_pools_plain_json", raw_json, document->GetAllocator());
   document->AddMember("resource_pools", resource_pools, document->GetAllocator());
+  document->AddMember("statestore_admission_control_time_since_last_update_ms",
+      ms_since_last_statestore_update, document->GetAllocator());
+  if (!staleness_detail.empty()) {
+    Value staleness_detail_json(staleness_detail.c_str(), document->GetAllocator());
+    document->AddMember("statestore_update_staleness_detail", staleness_detail_json,
+        document->GetAllocator());
+  }
+
   // Indicator that helps render UI elements based on this condition.
   document->AddMember("get_all_pools", get_all_pools, document->GetAllocator());
 }
diff --git a/tests/common/impala_test_suite.py b/tests/common/impala_test_suite.py
index f7df218..06299e4 100644
--- a/tests/common/impala_test_suite.py
+++ b/tests/common/impala_test_suite.py
@@ -842,14 +842,23 @@ class ImpalaTestSuite(BaseTestSuite):
     """Waits for the given 'query_handle' to reach the 'expected_state'. If it does not
     reach the given state within 'timeout' seconds, the method throws an AssertionError.
     """
+    self.wait_for_any_state(handle, [expected_state], timeout)
+
+  def wait_for_any_state(self, handle, expected_states, timeout):
+    """Waits for the given 'query_handle' to reach one of 'expected_states'. If it does
+    not reach one of the given states within 'timeout' seconds, the method throws an
+    AssertionError. Returns the final state.
+    """
     start_time = time.time()
     actual_state = self.client.get_state(handle)
-    while actual_state != expected_state and time.time() - start_time < timeout:
+    while actual_state not in expected_states and time.time() - start_time < timeout:
       actual_state = self.client.get_state(handle)
       time.sleep(0.5)
-    if actual_state != expected_state:
-      raise Timeout("query '%s' did not reach expected state '%s', last known state '%s'"
-                    % (handle.get_handle().id, expected_state, actual_state))
+    if actual_state not in expected_states:
+      raise Timeout("query {0} did not reach one of the expected states {1}, "
+                    "last known state {2}".format(handle.get_handle().id, expected_states,
+                    actual_state))
+    return actual_state
 
   def assert_impalad_log_contains(self, level, line_regex, expected_count=1):
     """
diff --git a/tests/custom_cluster/test_admission_controller.py b/tests/custom_cluster/test_admission_controller.py
index c28b3ef..7c97c5e 100644
--- a/tests/custom_cluster/test_admission_controller.py
+++ b/tests/custom_cluster/test_admission_controller.py
@@ -147,6 +147,9 @@ QUERY_END_BEHAVIORS = ['EOS', 'CLIENT_CANCEL', 'QUERY_TIMEOUT', 'CLIENT_CLOSE']
 # The timeout used for the QUERY_TIMEOUT end behaviour
 QUERY_END_TIMEOUT_S = 1
 
+# Value used for --admission_control_stale_topic_threshold_ms in tests.
+STALE_TOPIC_THRESHOLD_MS = 500
+
 # Regex that matches the first part of the profile info string added when a query is
 # queued.
 INITIAL_QUEUE_REASON_REGEX = \
@@ -267,12 +270,19 @@ class TestAdmissionController(TestAdmissionControllerBase, HS2TestSuite):
     HS2TestSuite.check_response(get_profile_resp)
     self.__check_query_options(get_profile_resp.profile, expected_options)
 
-  def _execute_and_collect_profiles(self, queries, timeout_s, config_options={}):
+  def _execute_and_collect_profiles(self, queries, timeout_s, config_options={},
+      allow_query_failure=False):
     """Submit the query statements in 'queries' in parallel to the first impalad in
     the cluster. After submission, the results are fetched from the queries in
     sequence and their profiles are collected. Wait for up to timeout_s for
-    each query to finish. Returns the profile strings."""
+    each query to finish. If 'allow_query_failure' is True, succeeds if the query
+    completes successfully or ends up in the EXCEPTION state. Otherwise expects the
+    queries to complete successfully.
+    Returns the profile strings."""
     client = self.cluster.impalads[0].service.create_beeswax_client()
+    expected_states = [client.QUERY_STATES['FINISHED']]
+    if allow_query_failure:
+      expected_states.append(client.QUERY_STATES['EXCEPTION'])
     try:
       handles = []
       profiles = []
@@ -280,8 +290,9 @@ class TestAdmissionController(TestAdmissionControllerBase, HS2TestSuite):
       for query in queries:
         handles.append(client.execute_async(query))
       for query, handle in zip(queries, handles):
-        self.wait_for_state(handle, client.QUERY_STATES['FINISHED'], timeout_s)
-        self.client.fetch(query, handle)
+        state = self.wait_for_any_state(handle, expected_states, timeout_s)
+        if state == client.QUERY_STATES['FINISHED']:
+          self.client.fetch(query, handle)
         profiles.append(self.client.get_runtime_profile(handle))
       return profiles
     finally:
@@ -850,6 +861,66 @@ class TestAdmissionController(TestAdmissionControllerBase, HS2TestSuite):
     # Close the queued query.
     self.close(queued_query_resp.operationHandle)
 
+  @pytest.mark.execute_serially
+  @CustomClusterTestSuite.with_args(
+      impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=3,
+          pool_max_mem=1024 * 1024 * 1024) +
+      " --admission_control_stale_topic_threshold_ms={0}".format(
+          STALE_TOPIC_THRESHOLD_MS),
+      statestored_args=_STATESTORED_ARGS)
+  def test_statestore_outage(self):
+    """Test behaviour with a failed statestore. Queries should continue to be admitted
+    but we should generate diagnostics about the stale topic."""
+    self.cluster.statestored.kill()
+    impalad = self.cluster.impalads[0]
+    # Sleep until the update should be definitely stale.
+    sleep(STALE_TOPIC_THRESHOLD_MS / 1000. * 1.5)
+    ac_json = impalad.service.get_debug_webpage_json('/admission')
+    ms_since_update = ac_json["statestore_admission_control_time_since_last_update_ms"]
+    assert ms_since_update > STALE_TOPIC_THRESHOLD_MS
+    assert ("Warning: admission control information from statestore is stale:" in
+        ac_json["statestore_update_staleness_detail"])
+
+    # Submit a batch of queries. One should get to run, one will be rejected because
+    # of the full queue, and the others will run after being queued.
+    STMT = "select sleep(100)"
+    TIMEOUT_S = 60
+    NUM_QUERIES = 5
+    profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
+        TIMEOUT_S, allow_query_failure=True)
+    ADMITTED_STALENESS_WARNING = \
+        "Warning: admission control information from statestore is stale"
+    ADMITTED_STALENESS_PROFILE_ENTRY = \
+        "Admission control state staleness: " + ADMITTED_STALENESS_WARNING
+
+    num_queued = 0
+    num_admitted_immediately = 0
+    num_rejected = 0
+    for profile in profiles:
+      if "Admission result: Admitted immediately" in profile:
+        assert ADMITTED_STALENESS_PROFILE_ENTRY in profile, profile
+        num_admitted_immediately += 1
+      elif "Admission result: Rejected" in profile:
+        num_rejected += 1
+        # Check that the rejection error returned to the client contains a warning.
+        query_statuses = [line for line in profile.split("\n")
+                          if "Query Status:" in line]
+        assert len(query_statuses) == 1, profile
+        assert ADMITTED_STALENESS_WARNING in query_statuses[0]
+      else:
+        assert "Admission result: Admitted (queued)" in profile, profile
+        assert ADMITTED_STALENESS_PROFILE_ENTRY in profile, profile
+
+        # Check that the queued reason contains a warning.
+        queued_reasons = [line for line in profile.split("\n")
+                         if "Initial admission queue reason:" in line]
+        assert len(queued_reasons) == 1, profile
+        assert ADMITTED_STALENESS_WARNING in queued_reasons[0]
+        num_queued += 1
+    assert num_admitted_immediately == 1
+    assert num_queued == 3
+    assert num_rejected == NUM_QUERIES - num_admitted_immediately - num_queued
+
 
 class TestAdmissionControllerStress(TestAdmissionControllerBase):
   """Submits a number of queries (parameterized) with some delay between submissions
diff --git a/www/admission_controller.tmpl b/www/admission_controller.tmpl
index edd2fff..cb60cde 100644
--- a/www/admission_controller.tmpl
+++ b/www/admission_controller.tmpl
@@ -85,9 +85,17 @@ Example of json received from the impala server
                 }
             ]
         }
-    ]
+    ],
+    "statestore_admission_control_time_since_last_update_ms": 745,
+    "statestore_update_staleness_detail": "Warning: admission control information from statestore is stale: 745ms since last update was received.",
+    "get_all_pools": true
 -->
 {{> www/common-header.tmpl }}
+{{?statestore_update_staleness_detail}}
+<div class="alert alert-danger" role="alert">
+<strong>{{statestore_update_staleness_detail}}</strong>
+</div>
+{{/statestore_update_staleness_detail}}
 <script src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.7.3/Chart.min.js" type="text/javascript"></script>
 <script type="text/javascript">
 window.onload = function() {
@@ -204,6 +212,12 @@ function reset_method(pool_name) {
   <a href='/backends'>backends</a> debug page for memory admitted and reserved per
   backend.
 </p>
+<p class="lead">
+<strong>
+Time since last statestore update containing admission control topic state (ms):
+</strong>
+{{statestore_admission_control_time_since_last_update_ms}}
+</p>
 {{#resource_pools}}
 <div class="container-fluid">
   <h3><a href='/admission?pool_name={{pool_name}}'>{{pool_name}}</a></h3>