You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by sh...@apache.org on 2023/12/12 19:13:23 UTC

(phoenix) branch PHOENIX-6883-feature updated: PHOENIX-7111 : Metrics for server-side metadata cache (#1744)

This is an automated email from the ASF dual-hosted git repository.

shahrs87 pushed a commit to branch PHOENIX-6883-feature
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/PHOENIX-6883-feature by this push:
     new 6ae5e4dc64 PHOENIX-7111 : Metrics for server-side metadata cache (#1744)
6ae5e4dc64 is described below

commit 6ae5e4dc645808c56d40896cee14b8ca2972b909
Author: palash <pa...@gmail.com>
AuthorDate: Wed Dec 13 00:43:18 2023 +0530

    PHOENIX-7111 : Metrics for server-side metadata cache (#1744)
---
 .../apache/phoenix/cache/ServerMetadataCache.java  |   8 +-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  24 ++-
 .../coprocessor/PhoenixRegionServerEndpoint.java   |   6 +
 .../metrics/MetricsMetadataCachingSource.java      | 230 +++++++++++++++++++++
 .../metrics/MetricsMetadataCachingSourceImpl.java  | 122 +++++++++++
 .../MetricsPhoenixCoprocessorSourceFactory.java    |  12 ++
 .../org/apache/phoenix/execute/MutationState.java  |   3 +-
 .../phoenix/cache/ServerMetadataCacheTest.java     | 117 +++++++++++
 8 files changed, 517 insertions(+), 5 deletions(-)

diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerMetadataCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerMetadataCache.java
index 15ce11e145..33a90af994 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerMetadataCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerMetadataCache.java
@@ -24,6 +24,8 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.coprocessor.metrics.MetricsMetadataCachingSource;
+import org.apache.phoenix.coprocessor.metrics.MetricsPhoenixCoprocessorSourceFactory;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting;
@@ -57,6 +59,7 @@ public class ServerMetadataCache {
     // key is the combination of <tenantID, schema name, table name>, value is the lastDDLTimestamp
     private final Cache<ImmutableBytesPtr, Long> lastDDLTimestampMap;
     private Connection connectionForTesting;
+    private MetricsMetadataCachingSource metricsSource;
 
     /**
      * Creates/gets an instance of ServerMetadataCache.
@@ -78,6 +81,8 @@ public class ServerMetadataCache {
 
     private ServerMetadataCache(Configuration conf) {
         this.conf = conf;
+        this.metricsSource = MetricsPhoenixCoprocessorSourceFactory
+                                .getInstance().getMetadataCachingSource();
         long maxTTL = conf.getLong(PHOENIX_COPROC_REGIONSERVER_CACHE_TTL_MS,
                 DEFAULT_PHOENIX_COPROC_REGIONSERVER_CACHE_TTL_MS);
         long maxSize = conf.getLong(PHOENIX_COPROC_REGIONSERVER_CACHE_SIZE,
@@ -111,11 +116,12 @@ public class ServerMetadataCache {
         // Lookup in cache if present.
         Long lastDDLTimestamp = lastDDLTimestampMap.getIfPresent(tableKeyPtr);
         if (lastDDLTimestamp != null) {
+            metricsSource.incrementRegionServerMetadataCacheHitCount();
             LOGGER.trace("Retrieving last ddl timestamp value from cache for tableName: {}",
                     fullTableNameStr);
             return lastDDLTimestamp;
         }
-
+        metricsSource.incrementRegionServerMetadataCacheMissCount();
         PTable table;
         String tenantIDStr = Bytes.toString(tenantID);
         if (tenantIDStr == null || tenantIDStr.isEmpty()) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 3beb426da0..d905f6ad5b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -189,6 +189,8 @@ import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionRespons
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.UpdateIndexStateRequest;
 import org.apache.phoenix.coprocessor.generated.RegionServerEndpointProtos;
+import org.apache.phoenix.coprocessor.metrics.MetricsMetadataCachingSource;
+import org.apache.phoenix.coprocessor.metrics.MetricsPhoenixCoprocessorSourceFactory;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.expression.Expression;
@@ -609,6 +611,7 @@ TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
     private boolean allowSplittableSystemCatalogRollback;
 
     private MetricsMetadataSource metricsSource;
+    private MetricsMetadataCachingSource metricsMetadataCachingSource;
     private long metadataCacheInvalidationTimeoutMs;
     public static void setFailConcurrentMutateAddColumnOneTimeForTesting(boolean fail) {
         failConcurrentMutateAddColumnOneTimeForTesting = fail;
@@ -653,6 +656,8 @@ TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
         Tracing.addTraceMetricsSource();
         Metrics.ensureConfigured();
         metricsSource = MetricsMetadataSourceFactory.getMetadataMetricsSource();
+        metricsMetadataCachingSource
+                = MetricsPhoenixCoprocessorSourceFactory.getInstance().getMetadataCachingSource();
     }
 
     @Override
@@ -3493,6 +3498,7 @@ TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
                             + " is not loaded");
             return;
         }
+        metricsMetadataCachingSource.incrementMetadataCacheInvalidationOperationsCount();
         Properties properties = new Properties();
         // Skip checking of system table existence since the system tables should have created
         // by now.
@@ -3503,7 +3509,17 @@ TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
             // This will incur an extra RPC to the master. This RPC is required since we want to
             // get current list of regionservers.
             Collection<ServerName> serverNames = admin.getRegionServers(true);
-            invalidateServerMetadataCacheWithRetries(admin, serverNames, requests, false);
+            PhoenixStopWatch stopWatch = new PhoenixStopWatch().start();
+            try {
+                invalidateServerMetadataCacheWithRetries(admin, serverNames, requests, false);
+                metricsMetadataCachingSource.incrementMetadataCacheInvalidationSuccessCount();
+            } catch (Throwable t) {
+                metricsMetadataCachingSource.incrementMetadataCacheInvalidationFailureCount();
+                throw t;
+            } finally {
+                metricsMetadataCachingSource
+                        .addMetadataCacheInvalidationTotalTime(stopWatch.stop().elapsedMillis());
+            }
         }
     }
 
@@ -3550,10 +3566,12 @@ TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
                     // hbase.rpc.timeout. Even if the future times out, this runnable can be in
                     // RUNNING state and will not be interrupted.
                     service.invalidateServerMetadataCache(controller, protoRequest);
+                    long cacheInvalidationTime = innerWatch.stop().elapsedMillis();
                     LOGGER.info("Invalidating metadata cache"
                             + " on region server: {} completed successfully and it took {} ms",
-                            serverName, innerWatch.stop().elapsedMillis());
-                    // TODO Create a histogram metric for time taken for invalidating the cache.
+                            serverName, cacheInvalidationTime);
+                    metricsMetadataCachingSource
+                            .addMetadataCacheInvalidationRpcTime(cacheInvalidationTime);
                 } catch (ServiceException se) {
                     LOGGER.error("Invalidating metadata cache failed for regionserver {}",
                             serverName, se);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixRegionServerEndpoint.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixRegionServerEndpoint.java
index a114bea095..fe7b801ece 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixRegionServerEndpoint.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixRegionServerEndpoint.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.cache.ServerMetadataCache;
 import org.apache.phoenix.coprocessor.generated.RegionServerEndpointProtos;
+import org.apache.phoenix.coprocessor.metrics.MetricsMetadataCachingSource;
+import org.apache.phoenix.coprocessor.metrics.MetricsPhoenixCoprocessorSourceFactory;
 import org.apache.phoenix.protobuf.ProtobufUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
@@ -43,17 +45,21 @@ public class PhoenixRegionServerEndpoint
         extends RegionServerEndpointProtos.RegionServerEndpointService
         implements RegionServerCoprocessor {
     private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRegionServerEndpoint.class);
+    private MetricsMetadataCachingSource metricsSource;
     protected Configuration conf;
 
     @Override
     public void start(CoprocessorEnvironment env) throws IOException {
         this.conf = env.getConfiguration();
+        this.metricsSource = MetricsPhoenixCoprocessorSourceFactory
+                                .getInstance().getMetadataCachingSource();
     }
 
     @Override
     public void validateLastDDLTimestamp(RpcController controller,
             RegionServerEndpointProtos.ValidateLastDDLTimestampRequest request,
             RpcCallback<RegionServerEndpointProtos.ValidateLastDDLTimestampResponse> done) {
+        metricsSource.incrementValidateTimestampRequestCount();
         for (RegionServerEndpointProtos.LastDDLTimestampRequest lastDDLTimestampRequest
                 : request.getLastDDLTimestampRequestsList()) {
             byte[] tenantID = lastDDLTimestampRequest.getTenantId().toByteArray();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/metrics/MetricsMetadataCachingSource.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/metrics/MetricsMetadataCachingSource.java
new file mode 100644
index 0000000000..0fe2dd7ac5
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/metrics/MetricsMetadataCachingSource.java
@@ -0,0 +1,230 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.coprocessor.metrics;
+
+import org.apache.hadoop.hbase.metrics.BaseSource;
+import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Interface for metrics about Distributed Metadata Caching
+ */
+public interface MetricsMetadataCachingSource extends BaseSource {
+    // Metrics2 and JMX constants
+    String METRICS_NAME = "MetadataCaching";
+    String METRICS_CONTEXT = "phoenix";
+    String METRICS_DESCRIPTION = "Metrics about Distributed Metadata Caching";
+    String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
+
+    String REGIONSERVER_METADATA_CACHE_HITS = "numRegionServerMetadataCacheHits";
+    String REGIONSERVER_METADATA_CACHE_HITS_DESC
+            = "Number of cache hits in PhoenixRegionServerEndpoint "
+                + "when serving validate ddl timestamp requests.";
+
+    String REGIONSERVER_METADATA_CACHE_MISSES = "numRegionServerMetadataCacheMisses";
+    String REGIONSERVER_METADATA_CACHE_MISSES_DESC
+            = "Number of cache misses in PhoenixRegionServerEndpoint "
+                + "when serving validate ddl timestamp requests.";
+
+    String VALIDATE_LAST_DDL_TIMESTAMP_REQUESTS = "numValidateLastDDLTimestampRequests";
+    String VALIDATE_LAST_DDL_TIMESTAMP_REQUEST_DESC
+            = "Number of validate last ddl timestamp requests.";
+
+    String METADATA_CACHE_INVALIDATION_OPERATIONS = "numMetadataCacheInvalidationOps";
+    String METADATA_CACHE_INVALIDATION_OPERATIONS_DESC = "Number of times we invoke "
+                                                    + "cache invalidation within a DDL operation";
+
+    String METADATA_CACHE_INVALIDATION_SUCCESS = "numMetadataCacheInvalidationOpsSuccess";
+    String METADATA_CACHE_INVALIDATION_SUCCESS_DESC
+            = "Number of times cache invalidation was successful.";
+
+    String METADATA_CACHE_INVALIDATION_FAILURE = "numMetadataCacheInvalidationOpsFailure";
+    String METADATA_CACHE_INVALIDATION_FAILURE_DESC = "Number of times cache invalidation failed.";
+
+    String METADATA_CACHE_INVALIDATION_RPC_TIME = "metadataCacheInvalidationRpcTimeMs";
+    String METADATA_CACHE_INVALIDATION_RPC_TIME_DESC = "Histogram for the time in milliseconds for"
+                                                + " cache invalidation RPC";
+    String METADATA_CACHE_INVALIDATION_TOTAL_TIME = "metadataCacheInvalidationTotalTimeMs";
+    String METADATA_CACHE_INVALIDATION_TOTAL_TIME_DESC
+            = "Histogram for the total time in milliseconds "
+                + "for cache invalidation on all regionservers";
+
+    /**
+     * Report the number of cache hits when validating last ddl timestamps.
+     */
+    void incrementRegionServerMetadataCacheHitCount();
+
+    /**
+     * Report the number of cache misses when validating last ddl timestamps.
+     */
+    void incrementRegionServerMetadataCacheMissCount();
+
+    /**
+     * Report the number of requests for validating last ddl timestamps.
+     */
+    void incrementValidateTimestampRequestCount();
+
+    /**
+     * Report number of cache invalidations performed.
+     */
+    void incrementMetadataCacheInvalidationOperationsCount();
+
+    /**
+     * Report number of cache invalidations which were successful.
+     */
+    void incrementMetadataCacheInvalidationSuccessCount();
+
+    /**
+     * Report number of cache invalidations which failed.
+     */
+    void incrementMetadataCacheInvalidationFailureCount();
+
+    /**
+     * Add to the cache invalidation rpc time histogram.
+     */
+    void addMetadataCacheInvalidationRpcTime(long t);
+
+    /**
+     * Add to the cache invalidation total time histogram.
+     * @param t
+     */
+    void addMetadataCacheInvalidationTotalTime(long t);
+
+    /**
+     * Return current values of all metrics.
+     * @return {@link MetadataCachingMetricValues} object
+     */
+    @VisibleForTesting
+    MetadataCachingMetricValues getCurrentMetricValues();
+
+    /**
+     * Class to represent values of all metrics related to server metadata caching.
+     */
+    @VisibleForTesting
+    class MetadataCachingMetricValues {
+        private long cacheHitCount;
+        private long cacheMissCount;
+        private long validateDDLTimestampRequestsCount;
+        private long cacheInvalidationOpsCount;
+        private long cacheInvalidationSuccessCount;
+        private long cacheInvalidationFailureCount;
+        private long cacheInvalidationRpcTimeCount;
+        private long cacheInvalidationTotalTimeCount;
+
+        MetadataCachingMetricValues(Builder builder) {
+            this.cacheHitCount = builder.cacheHitCount;
+            this.cacheMissCount = builder.cacheMissCount;
+            this.validateDDLTimestampRequestsCount = builder.validateDDLTimestampRequestsCount;
+            this.cacheInvalidationOpsCount = builder.cacheInvalidationOpsCount;
+            this.cacheInvalidationSuccessCount = builder.cacheInvalidationSuccessCount;
+            this.cacheInvalidationFailureCount = builder.cacheInvalidationFailureCount;
+            this.cacheInvalidationRpcTimeCount = builder.cacheInvalidationRpcTimeCount;
+            this.cacheInvalidationTotalTimeCount = builder.cacheInvalidationTotalTimeCount;
+        }
+
+        public long getCacheHitCount() {
+            return cacheHitCount;
+        }
+
+        public long getCacheMissCount() {
+            return cacheMissCount;
+        }
+
+        public long getValidateDDLTimestampRequestsCount() {
+            return validateDDLTimestampRequestsCount;
+        }
+
+        public long getCacheInvalidationOpsCount() {
+            return cacheInvalidationOpsCount;
+        }
+
+        public long getCacheInvalidationSuccessCount() {
+            return cacheInvalidationSuccessCount;
+        }
+
+        public long getCacheInvalidationFailureCount() {
+            return cacheInvalidationFailureCount;
+        }
+
+        public long getCacheInvalidationRpcTimeCount() {
+            return cacheInvalidationRpcTimeCount;
+        }
+
+        public long getCacheInvalidationTotalTimeCount() {
+            return cacheInvalidationTotalTimeCount;
+        }
+
+        /**
+         * Builder for {@link MetadataCachingMetricValues}
+         */
+        public static class Builder {
+            private long cacheHitCount;
+            private long cacheMissCount;
+            private long validateDDLTimestampRequestsCount;
+            private long cacheInvalidationOpsCount;
+            private long cacheInvalidationSuccessCount;
+            private long cacheInvalidationFailureCount;
+            private long cacheInvalidationRpcTimeCount;
+            private long cacheInvalidationTotalTimeCount;
+
+            public MetadataCachingMetricValues build() {
+                return new MetadataCachingMetricValues(this);
+            }
+
+            public Builder setCacheHitCount(long c) {
+                this.cacheHitCount = c;
+                return this;
+            }
+            public Builder setCacheMissCount(long cacheMissCount) {
+                this.cacheMissCount = cacheMissCount;
+                return this;
+            }
+
+            public Builder setValidateDDLTimestampRequestsCount(
+                    long validateDDLTimestampRequestsCount) {
+                this.validateDDLTimestampRequestsCount = validateDDLTimestampRequestsCount;
+                return this;
+            }
+
+            public Builder setCacheInvalidationOpsCount(long cacheInvalidationOpsCount) {
+                this.cacheInvalidationOpsCount = cacheInvalidationOpsCount;
+                return this;
+            }
+
+            public Builder setCacheInvalidationSuccessCount(long cacheInvalidationSuccessCount) {
+                this.cacheInvalidationSuccessCount = cacheInvalidationSuccessCount;
+                return this;
+            }
+
+            public Builder setCacheInvalidationFailureCount(long cacheInvalidationFailureCount) {
+                this.cacheInvalidationFailureCount = cacheInvalidationFailureCount;
+                return this;
+            }
+
+            public Builder setCacheInvalidationRpcTimeCount(long cacheInvalidationRpcTimeCount) {
+                this.cacheInvalidationRpcTimeCount = cacheInvalidationRpcTimeCount;
+                return this;
+            }
+
+            public Builder setCacheInvalidationTotalTimeCount(
+                    long cacheInvalidationTotalTimeCount) {
+                this.cacheInvalidationTotalTimeCount = cacheInvalidationTotalTimeCount;
+                return this;
+            }
+        }
+    }
+}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/metrics/MetricsMetadataCachingSourceImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/metrics/MetricsMetadataCachingSourceImpl.java
new file mode 100644
index 0000000000..67098629fd
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/metrics/MetricsMetadataCachingSourceImpl.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.coprocessor.metrics;
+
+import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
+import org.apache.hadoop.metrics2.MetricHistogram;
+import org.apache.hadoop.metrics2.lib.MutableFastCounter;
+
+/**
+ * Implementation for tracking Distributed Metadata Caching metrics.
+ */
+public class MetricsMetadataCachingSourceImpl
+        extends BaseSourceImpl
+        implements MetricsMetadataCachingSource {
+
+    private final MutableFastCounter cacheHitCounter;
+    private final MutableFastCounter cacheMissCounter;
+    private final MutableFastCounter validateDDLTimestampRequestCounter;
+    private final MutableFastCounter cacheInvalidationOpsCounter;
+    private final MutableFastCounter cacheInvalidationSuccessCounter;
+    private final MutableFastCounter cacheInvalidationFailureCounter;
+    private final MetricHistogram cacheInvalidationRpcTimeHistogram;
+    private final MetricHistogram cacheInvalidationTotalTimeHistogram;
+
+    public MetricsMetadataCachingSourceImpl() {
+        this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
+    }
+
+    public MetricsMetadataCachingSourceImpl(String metricsName,
+                                       String metricsDescription,
+                                       String metricsContext,
+                                       String metricsJmxContext) {
+        super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
+        cacheHitCounter = getMetricsRegistry().newCounter(
+                REGIONSERVER_METADATA_CACHE_HITS, REGIONSERVER_METADATA_CACHE_HITS_DESC, 0L);
+        cacheMissCounter = getMetricsRegistry().newCounter(
+            REGIONSERVER_METADATA_CACHE_MISSES, REGIONSERVER_METADATA_CACHE_MISSES_DESC, 0L);
+        validateDDLTimestampRequestCounter = getMetricsRegistry().newCounter(
+            VALIDATE_LAST_DDL_TIMESTAMP_REQUESTS, VALIDATE_LAST_DDL_TIMESTAMP_REQUEST_DESC, 0L);
+        cacheInvalidationOpsCounter = getMetricsRegistry().newCounter(
+            METADATA_CACHE_INVALIDATION_OPERATIONS,
+                METADATA_CACHE_INVALIDATION_OPERATIONS_DESC, 0L);
+        cacheInvalidationSuccessCounter = getMetricsRegistry().newCounter(
+            METADATA_CACHE_INVALIDATION_SUCCESS, METADATA_CACHE_INVALIDATION_SUCCESS_DESC, 0L);
+        cacheInvalidationFailureCounter = getMetricsRegistry().newCounter(
+            METADATA_CACHE_INVALIDATION_FAILURE, METADATA_CACHE_INVALIDATION_FAILURE_DESC, 0L);
+        cacheInvalidationRpcTimeHistogram = getMetricsRegistry().newHistogram(
+            METADATA_CACHE_INVALIDATION_RPC_TIME, METADATA_CACHE_INVALIDATION_RPC_TIME_DESC);
+        cacheInvalidationTotalTimeHistogram = getMetricsRegistry().newHistogram(
+            METADATA_CACHE_INVALIDATION_TOTAL_TIME, METADATA_CACHE_INVALIDATION_TOTAL_TIME_DESC);
+    }
+
+    @Override
+    public void incrementRegionServerMetadataCacheHitCount() {
+        cacheHitCounter.incr();
+    }
+
+    @Override
+    public void incrementRegionServerMetadataCacheMissCount() {
+        cacheMissCounter.incr();
+    }
+
+    @Override
+    public void incrementValidateTimestampRequestCount() {
+        validateDDLTimestampRequestCounter.incr();
+    }
+
+    @Override
+    public void addMetadataCacheInvalidationRpcTime(long t) {
+        cacheInvalidationRpcTimeHistogram.add(t);
+    }
+
+    @Override
+    public void addMetadataCacheInvalidationTotalTime(long t) {
+        cacheInvalidationTotalTimeHistogram.add(t);
+    }
+
+    @Override
+    public void incrementMetadataCacheInvalidationOperationsCount() {
+        cacheInvalidationOpsCounter.incr();
+    }
+
+    @Override
+    public void incrementMetadataCacheInvalidationSuccessCount() {
+        cacheInvalidationSuccessCounter.incr();
+    }
+
+    @Override
+    public void incrementMetadataCacheInvalidationFailureCount() {
+        cacheInvalidationFailureCounter.incr();
+    }
+
+    @Override
+    public MetadataCachingMetricValues getCurrentMetricValues() {
+        return new MetadataCachingMetricValues
+                .Builder()
+                .setCacheHitCount(cacheHitCounter.value())
+                .setCacheMissCount(cacheMissCounter.value())
+                .setValidateDDLTimestampRequestsCount(validateDDLTimestampRequestCounter.value())
+                .setCacheInvalidationRpcTimeCount(cacheInvalidationRpcTimeHistogram.getCount())
+                .setCacheInvalidationTotalTimeCount(cacheInvalidationTotalTimeHistogram.getCount())
+                .setCacheInvalidationOpsCount(cacheInvalidationOpsCounter.value())
+                .setCacheInvalidationSuccessCount(cacheInvalidationSuccessCounter.value())
+                .setCacheInvalidationFailureCount(cacheInvalidationFailureCounter.value())
+                .build();
+    }
+}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/metrics/MetricsPhoenixCoprocessorSourceFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/metrics/MetricsPhoenixCoprocessorSourceFactory.java
index 47ee949e12..ff7f501a7a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/metrics/MetricsPhoenixCoprocessorSourceFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/metrics/MetricsPhoenixCoprocessorSourceFactory.java
@@ -26,6 +26,7 @@ public class MetricsPhoenixCoprocessorSourceFactory {
             INSTANCE = new MetricsPhoenixCoprocessorSourceFactory();
     // Holds the PHOENIX_TTL related metrics.
     private static volatile MetricsPhoenixTTLSource phoenixTTLSource;
+    private static volatile MetricsMetadataCachingSource metadataCachingSource;
 
     public static MetricsPhoenixCoprocessorSourceFactory getInstance() {
         return INSTANCE;
@@ -42,4 +43,15 @@ public class MetricsPhoenixCoprocessorSourceFactory {
         }
         return INSTANCE.phoenixTTLSource;
     }
+
+    public MetricsMetadataCachingSource getMetadataCachingSource() {
+        if (INSTANCE.metadataCachingSource == null) {
+            synchronized (MetricsMetadataCachingSource.class) {
+                if (INSTANCE.metadataCachingSource == null) {
+                    INSTANCE.metadataCachingSource = new MetricsMetadataCachingSourceImpl();
+                }
+            }
+        }
+        return INSTANCE.metadataCachingSource;
+    }
 }
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 1612fe36a0..14f751988c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -1215,7 +1215,8 @@ public class MutationState implements SQLCloseable {
 
         //if enabled, validate last ddl timestamps for all tables in the mutationsMap
         //for now, force update client cache for all tables if StaleMetadataCacheException is seen
-        if (this.validateLastDdlTimestamp) {
+        //mutationsMap can be empty, for e.g. during a DDL operation
+        if (this.validateLastDdlTimestamp && !this.mutationsMap.isEmpty()) {
             List<TableRef> tableRefs = new ArrayList<>(this.mutationsMap.keySet());
             try {
                 ValidateLastDDLTimestampUtil.validateLastDDLTimestamp(
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/cache/ServerMetadataCacheTest.java b/phoenix-core/src/test/java/org/apache/phoenix/cache/ServerMetadataCacheTest.java
index 6ae19ad144..a210e40579 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/cache/ServerMetadataCacheTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/cache/ServerMetadataCacheTest.java
@@ -18,6 +18,8 @@
 package org.apache.phoenix.cache;
 
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.coprocessor.metrics.MetricsMetadataCachingSource;
+import org.apache.phoenix.coprocessor.metrics.MetricsPhoenixCoprocessorSourceFactory;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
@@ -78,6 +80,10 @@ public class ServerMetadataCacheTest extends ParallelStatsDisabledIT {
     public static synchronized void doSetup() throws Exception {
         Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
         props.put(QueryServices.LAST_DDL_TIMESTAMP_VALIDATION_ENABLED, Boolean.toString(true));
+        props.put(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB,
+                Long.toString(Long.MAX_VALUE));
+        props.put(QueryServices.TASK_HANDLING_INITIAL_DELAY_MS_ATTRIB,
+                Long.toString(Long.MAX_VALUE));
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
     }
 
@@ -1303,6 +1309,117 @@ public class ServerMetadataCacheTest extends ParallelStatsDisabledIT {
         }
     }
 
+    /**
+     * Test server side metrics are populated correctly.
+     * Client-1 creates a table and creates an index on it.
+     * Client-2 queries the table.
+     */
+    @Test
+    public void testServerSideMetrics() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        String url1 = QueryUtil.getConnectionUrl(props, config, "client1");
+        String url2 = QueryUtil.getConnectionUrl(props, config, "client2");
+        String tableName = generateUniqueName();
+        String indexName = generateUniqueName();
+        ConnectionQueryServices cqs1 = driver.getConnectionQueryServices(url1, props);
+        ConnectionQueryServices cqs2 = driver.getConnectionQueryServices(url2, props);
+        MetricsMetadataCachingSource metricsSource
+                = MetricsPhoenixCoprocessorSourceFactory.getInstance().getMetadataCachingSource();
+
+        //take a snapshot of current metric values
+        MetricsMetadataCachingSource.MetadataCachingMetricValues oldMetricValues
+                = metricsSource.getCurrentMetricValues();
+
+        long cacheHit = 0;
+        long cacheMiss = 0;
+        long validateDDLRequestCount = 0;
+        long cacheInvOpsCount = 0;
+        long cacheInvSuccessCount = 0;
+        long cacheInvFailureCount = 0;
+        long cacheInvRpcTimeCount = 0;
+        long cacheInvTotalTimeCount = 0;
+
+        try (Connection conn1 = cqs1.connect(url1, props);
+             Connection conn2 = cqs2.connect(url2, props)) {
+
+            // no metric changes
+            createTable(conn1, tableName, NEVER);
+
+            // client validates table, regionserver does not find table in its cache
+            query(conn2, tableName);
+            validateDDLRequestCount++;
+            cacheMiss++;
+
+            // last_ddl_timestamp is bumped for the table
+            // cache invalidation operation succeeds for table
+            // cache invalidation operation succeeds for index state change
+            // only one region server in tests for cache invalidation RPC
+            createIndex(conn1, tableName, indexName, "v1");
+            cacheInvOpsCount += 2;
+            cacheInvRpcTimeCount += 2;
+            cacheInvTotalTimeCount += 2;
+            cacheInvSuccessCount += 2;
+
+            // client validates only table since it does not know about the index yet
+            // regionserver does not find table in its cache
+            query(conn2, tableName);
+            validateDDLRequestCount++;
+            cacheMiss++;
+
+            // client validates both index and table this time
+            // regionserver finds table but does not find index in its cache
+            query(conn2, tableName);
+            validateDDLRequestCount++;
+            cacheHit++; //table
+            cacheMiss++; //index
+
+            // client validates index and table again
+            // regionserver finds both index and table in its cache
+            query(conn2, tableName);
+            validateDDLRequestCount++;
+            cacheHit += 2;
+
+            MetricsMetadataCachingSource.MetadataCachingMetricValues newMetricValues
+                    = metricsSource.getCurrentMetricValues();
+
+            assertEquals("Incorrect number of cache hits on region server.", cacheHit,
+                    newMetricValues.getCacheHitCount() - oldMetricValues.getCacheHitCount());
+
+            assertEquals("Incorrect number of cache misses on region server.", cacheMiss,
+                newMetricValues.getCacheMissCount() - oldMetricValues.getCacheMissCount());
+
+            assertEquals("Incorrect number of validate ddl timestamp requests.",
+                    validateDDLRequestCount,
+                    newMetricValues.getValidateDDLTimestampRequestsCount()
+                            - oldMetricValues.getValidateDDLTimestampRequestsCount());
+
+            assertEquals("Incorrect number of cache invalidation ops count.",
+                    cacheInvOpsCount,
+                    newMetricValues.getCacheInvalidationOpsCount()
+                            - oldMetricValues.getCacheInvalidationOpsCount());
+
+            assertEquals("Incorrect number of successful cache invalidation ops count.",
+                    cacheInvSuccessCount,
+                    newMetricValues.getCacheInvalidationSuccessCount()
+                            - oldMetricValues.getCacheInvalidationSuccessCount());
+
+            assertEquals("Incorrect number of failed cache invalidation ops count.",
+                    cacheInvFailureCount,
+                    newMetricValues.getCacheInvalidationFailureCount()
+                            - oldMetricValues.getCacheInvalidationFailureCount());
+
+            assertEquals("Incorrect number of cache invalidation RPC times.",
+                    cacheInvRpcTimeCount,
+                    newMetricValues.getCacheInvalidationRpcTimeCount()
+                            - oldMetricValues.getCacheInvalidationRpcTimeCount());
+
+            assertEquals("Incorrect number of cache invalidation total times.",
+                    cacheInvTotalTimeCount,
+                    newMetricValues.getCacheInvalidationTotalTimeCount()
+                            - oldMetricValues.getCacheInvalidationTotalTimeCount());
+        }
+    }
+
     //Helper methods
 
     private long getLastDDLTimestamp(String tableName) throws SQLException {