You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by cl...@apache.org on 2019/08/12 15:50:53 UTC

[cassandra] branch trunk updated: Improve readability of Table metrics Virtual tables units

This is an automated email from the ASF dual-hosted git repository.

clohfink pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 9a175a1  Improve readability of Table metrics Virtual tables units
9a175a1 is described below

commit 9a175a1697b1107fb63480fb86ffe37b02122267
Author: Chris Lohfink <cl...@gmail.com>
AuthorDate: Thu Aug 8 12:43:18 2019 -0700

    Improve readability of Table metrics Virtual tables units
    
    Patch by Chris Lohfink; reviewed by Jon Haddad and Benedict Elliott Smith for CASSANDRA-15194
---
 CHANGES.txt                                        |   1 +
 .../cassandra/db/virtual/AbstractVirtualTable.java |   2 +-
 .../apache/cassandra/db/virtual/SimpleDataSet.java |   7 +
 .../cassandra/db/virtual/TableMetricTables.java    | 246 ++++++++++++++-------
 4 files changed, 180 insertions(+), 76 deletions(-)

diff --git a/CHANGES.txt b/CHANGES.txt
index fb246ff..389569b 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 4.0
+ * Improve readability of Table metrics Virtual tables units (CASSANDRA-15194)
  * Fix error with non-existent table for nodetool tablehistograms (CASSANDRA-14410)
  * Catch non-IOException in FileUtils.close to make sure that all resources are closed (CASSANDRA-15225)
  * Align load column in nodetool status output (CASSANDRA-14787)
diff --git a/src/java/org/apache/cassandra/db/virtual/AbstractVirtualTable.java b/src/java/org/apache/cassandra/db/virtual/AbstractVirtualTable.java
index 2998b77..6c49b9a 100644
--- a/src/java/org/apache/cassandra/db/virtual/AbstractVirtualTable.java
+++ b/src/java/org/apache/cassandra/db/virtual/AbstractVirtualTable.java
@@ -42,7 +42,7 @@ import org.apache.cassandra.schema.TableMetadata;
  */
 public abstract class AbstractVirtualTable implements VirtualTable
 {
-    private final TableMetadata metadata;
+    protected final TableMetadata metadata;
 
     protected AbstractVirtualTable(TableMetadata metadata)
     {
diff --git a/src/java/org/apache/cassandra/db/virtual/SimpleDataSet.java b/src/java/org/apache/cassandra/db/virtual/SimpleDataSet.java
index bf40140..6cead97 100644
--- a/src/java/org/apache/cassandra/db/virtual/SimpleDataSet.java
+++ b/src/java/org/apache/cassandra/db/virtual/SimpleDataSet.java
@@ -73,6 +73,8 @@ public class SimpleDataSet extends AbstractVirtualTable.AbstractDataSet
     {
         if (null == currentRow)
             throw new IllegalStateException();
+        if (null == value || columnName == null)
+            throw new IllegalStateException(String.format("Invalid column: %s=%s for %s", columnName, value, currentRow));
         currentRow.add(columnName, value);
         return this;
     }
@@ -181,6 +183,11 @@ public class SimpleDataSet extends AbstractVirtualTable.AbstractDataSet
 
             return builder.build();
         }
+
+        public String toString()
+        {
+            return "Row[...:" + clustering.toString(metadata)+']';
+        }
     }
 
     @SuppressWarnings("unchecked")
diff --git a/src/java/org/apache/cassandra/db/virtual/TableMetricTables.java b/src/java/org/apache/cassandra/db/virtual/TableMetricTables.java
index acae2d0..4a043ad 100644
--- a/src/java/org/apache/cassandra/db/virtual/TableMetricTables.java
+++ b/src/java/org/apache/cassandra/db/virtual/TableMetricTables.java
@@ -18,29 +18,25 @@
 
 package org.apache.cassandra.db.virtual;
 
+import java.math.BigDecimal;
 import java.util.Collection;
 import java.util.function.Function;
 
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
+import org.apache.commons.math3.util.Precision;
 
-import com.codahale.metrics.Counter;
 import com.codahale.metrics.Counting;
 import com.codahale.metrics.Gauge;
-import com.codahale.metrics.Histogram;
 import com.codahale.metrics.Metered;
 import com.codahale.metrics.Metric;
 import com.codahale.metrics.Sampling;
 import com.codahale.metrics.Snapshot;
-import com.codahale.metrics.Timer;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.db.marshal.DoubleType;
-import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.db.marshal.LongType;
-import org.apache.cassandra.db.marshal.ReversedType;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.LocalPartitioner;
@@ -55,13 +51,14 @@ public class TableMetricTables
 {
     private final static String KEYSPACE_NAME = "keyspace_name";
     private final static String TABLE_NAME = "table_name";
-    private final static String MEDIAN = "median";
+    private final static String P50 = "50th";
     private final static String P99 = "99th";
     private final static String MAX = "max";
     private final static String RATE = "per_second";
+    private final static double BYTES_TO_MIB = 1.0 / (1024 * 1024);
+    private final static double NS_TO_MS = 0.000001;
 
-    private final static AbstractType<?> TYPE = CompositeType.getInstance(ReversedType.getInstance(LongType.instance),
-                                                                          UTF8Type.instance,
+    private final static AbstractType<?> TYPE = CompositeType.getInstance(UTF8Type.instance,
                                                                           UTF8Type.instance);
     private final static IPartitioner PARTITIONER = new LocalPartitioner(TYPE);
 
@@ -71,48 +68,188 @@ public class TableMetricTables
     public static Collection<VirtualTable> getAll(String name)
     {
         return ImmutableList.of(
-        getMetricTable(name, "local_reads", t -> t.readLatency.latency),
-        getMetricTable(name, "local_scans", t -> t.rangeLatency.latency),
-        getMetricTable(name, "coordinator_reads", t -> t.coordinatorReadLatency),
-        getMetricTable(name, "coordinator_scans", t -> t.coordinatorScanLatency),
-        getMetricTable(name, "local_writes", t -> t.writeLatency.latency),
-        getMetricTable(name, "coordinator_writes", t -> t.coordinatorWriteLatency),
-        getMetricTable(name, "tombstones_scanned", t -> t.tombstoneScannedHistogram.cf),
-        getMetricTable(name, "live_scanned", t -> t.liveScannedHistogram.cf),
-        getMetricTable(name, "disk_usage", t -> t.totalDiskSpaceUsed, "disk_space"),
-        getMetricTable(name, "max_partition_size", t -> t.maxPartitionSize, "max_partition_size"));
+            new LatencyTableMetric(name, "local_read_latency", t -> t.readLatency.latency),
+            new LatencyTableMetric(name, "local_scan_latency", t -> t.rangeLatency.latency),
+            new LatencyTableMetric(name, "coordinator_read_latency", t -> t.coordinatorReadLatency),
+            new LatencyTableMetric(name, "coordinator_scan_latency", t -> t.coordinatorScanLatency),
+            new LatencyTableMetric(name, "local_write_latency", t -> t.writeLatency.latency),
+            new LatencyTableMetric(name, "coordinator_write_latency", t -> t.coordinatorWriteLatency),
+            new HistogramTableMetric(name, "tombstones_per_read", t -> t.tombstoneScannedHistogram.cf),
+            new HistogramTableMetric(name, "rows_per_read", t -> t.liveScannedHistogram.cf),
+            new StorageTableMetric(name, "disk_usage", (TableMetrics t) -> t.totalDiskSpaceUsed),
+            new StorageTableMetric(name, "max_partition_size", (TableMetrics t) -> t.maxPartitionSize));
     }
 
-    public static VirtualTable getMetricTable(String keyspace, String table, Function<TableMetrics, Metric> func)
+    /**
+     * A table that describes a some amount of disk on space in a Counter or Gauge
+     */
+    private static class StorageTableMetric extends TableMetricTable
+    {
+        interface GaugeFunction extends Function<TableMetrics, Gauge<Long>> {}
+        interface CountingFunction<M extends Metric & Counting> extends Function<TableMetrics, M> {}
+
+        <M extends Metric & Counting> StorageTableMetric(String keyspace, String table, CountingFunction<M> func)
+        {
+            super(keyspace, table, func, "mebibytes", LongType.instance, "");
+        }
+
+        StorageTableMetric(String keyspace, String table, GaugeFunction func)
+        {
+            super(keyspace, table, func, "mebibytes", LongType.instance, "");
+        }
+
+        /**
+         * Convert bytes to mebibytes, always round up to nearest MiB
+         */
+        public void add(SimpleDataSet result, String column, long value)
+        {
+            result.column(column, (long) Math.ceil(value * BYTES_TO_MIB));
+        }
+    }
+
+    /**
+     * A table that describes a Latency metric, specifically a Timer
+     */
+    private static class HistogramTableMetric extends TableMetricTable
+    {
+        <M extends Metric & Sampling> HistogramTableMetric(String keyspace, String table, Function<TableMetrics, M> func)
+        {
+            this(keyspace, table, func, "");
+        }
+
+        <M extends Metric & Sampling> HistogramTableMetric(String keyspace, String table, Function<TableMetrics, M> func, String suffix)
+        {
+            super(keyspace, table, func, "count", LongType.instance, suffix);
+        }
+
+        /**
+         * When displaying in cqlsh if we allow doubles to be too precise we get scientific notation which is hard to
+         * read so round off at 0.000.
+         */
+        public void add(SimpleDataSet result, String column, double value)
+        {
+            result.column(column, Precision.round(value, 3, BigDecimal.ROUND_HALF_UP));
+        }
+    }
+
+    /**
+     * A table that describes a Latency metric, specifically a Timer
+     */
+    private static class LatencyTableMetric extends HistogramTableMetric
+    {
+        <M extends Metric & Sampling> LatencyTableMetric(String keyspace, String table, Function<TableMetrics, M> func)
+        {
+            super(keyspace, table, func, "_ms");
+        }
+
+        /**
+         * For the metrics that are time based, convert to to milliseconds
+         */
+        public void add(SimpleDataSet result, String column, double value)
+        {
+            if (column.endsWith(suffix))
+                value *= NS_TO_MS;
+
+            super.add(result, column, value);
+        }
+    }
+
+    /**
+     * Abstraction over the Metrics Gauge, Counter, and Timer that will turn it into a (keyspace_name, table_name)
+     * table.
+     */
+    private static class TableMetricTable extends AbstractVirtualTable
     {
-        return getMetricTable(keyspace, table, func, "count");
+        final Function<TableMetrics, ? extends Metric> func;
+        final String columnName;
+        final String suffix;
+
+        TableMetricTable(String keyspace, String table, Function<TableMetrics, ? extends Metric> func,
+                                String colName, AbstractType colType, String suffix)
+        {
+            super(buildMetadata(keyspace, table, func, colName, colType, suffix));
+            this.func = func;
+            this.columnName = colName;
+            this.suffix = suffix;
+        }
+
+        public void add(SimpleDataSet result, String column, double value)
+        {
+            result.column(column, value);
+        }
+
+        public void add(SimpleDataSet result, String column, long value)
+        {
+            result.column(column,  value);
+        }
+
+        public DataSet data()
+        {
+            SimpleDataSet result = new SimpleDataSet(metadata());
+
+            // Iterate over all tables and get metric by function
+            for (ColumnFamilyStore cfs : ColumnFamilyStore.all())
+            {
+                Metric metric = func.apply(cfs.metric);
+
+                // set new partition for this table
+                result.row(cfs.keyspace.getName(), cfs.name);
+
+                // extract information by metric type and put it in row based on implementation of `add`
+                if (metric instanceof Counting)
+                {
+                    add(result, columnName, ((Counting) metric).getCount());
+                    if (metric instanceof Sampling)
+                    {
+                        Sampling histo = (Sampling) metric;
+                        Snapshot snapshot = histo.getSnapshot();
+                        // EstimatedHistogram keeping them in ns is hard to parse as a human so convert to ms
+                        add(result, P50 + suffix, snapshot.getMedian());
+                        add(result, P99 + suffix, snapshot.get99thPercentile());
+                        add(result, MAX + suffix, (double) snapshot.getMax());
+                    }
+                    if (metric instanceof Metered)
+                    {
+                        Metered timer = (Metered) metric;
+                        add(result, RATE, timer.getFiveMinuteRate());
+                    }
+                }
+                else if (metric instanceof Gauge)
+                {
+                    add(result, columnName, (long) ((Gauge) metric).getValue());
+                }
+            }
+            return result;
+        }
     }
 
     /**
-     * Abstraction over the Metrics Gauge, Counter, and Timer that will turn it into a ([pk], keyspace_name, table_name)
-     * table. The primary key (default 'count') is in descending orde in order to visually sort the rows when selecting
-     * the entire table in CQLSH.
+     *  Identify the type of Metric it is (gauge, counter etc) abd create the TableMetadata. The column name
+     *  and type for a counter/gauge is formatted differently based on the units (bytes/time) so allowed to
+     *  be set.
      */
-    public static VirtualTable getMetricTable(String keyspace, String table, Function<TableMetrics, Metric> func, String pk)
+    private static TableMetadata buildMetadata(String keyspace, String table, Function<TableMetrics, ? extends Metric> func,
+                                              String colName, AbstractType colType, String suffix)
     {
         TableMetadata.Builder metadata = TableMetadata.builder(keyspace, table)
                                                       .kind(TableMetadata.Kind.VIRTUAL)
-                                                      .addPartitionKeyColumn(pk, ReversedType.getInstance(LongType.instance))
                                                       .addPartitionKeyColumn(KEYSPACE_NAME, UTF8Type.instance)
                                                       .addPartitionKeyColumn(TABLE_NAME, UTF8Type.instance)
                                                       .partitioner(PARTITIONER);
 
+        // get a table from system keyspace and get metric from it for determining type of metric
         Keyspace system = Keyspace.system().iterator().next();
-
-        // Identify the type of Metric it is (gauge, counter etc) and verify the types work
         Metric test = func.apply(system.getColumnFamilyStores().iterator().next().metric);
-        if(test instanceof Counting)
+
+        if (test instanceof Counting)
         {
+            metadata.addRegularColumn(colName, colType);
+            // if it has a Histogram include some information about distribution
             if (test instanceof Sampling)
             {
-                metadata.addRegularColumn(MEDIAN, LongType.instance)
-                        .addRegularColumn(P99, LongType.instance)
-                        .addRegularColumn(MAX, LongType.instance);
+                metadata.addRegularColumn(P50 + suffix, DoubleType.instance)
+                        .addRegularColumn(P99 + suffix, DoubleType.instance)
+                        .addRegularColumn(MAX + suffix, DoubleType.instance);
             }
             if (test instanceof Metered)
             {
@@ -121,49 +258,8 @@ public class TableMetricTables
         }
         else if (test instanceof Gauge)
         {
-            Preconditions.checkArgument(((Gauge) test).getValue().getClass().isAssignableFrom(Long.class));
+            metadata.addRegularColumn(colName, colType);
         }
-
-        // Create the VirtualTable that will walk through all tables and get the Metric for each to build the tables
-        // SimpleDataSet
-        return new AbstractVirtualTable(metadata.build())
-        {
-            public DataSet data()
-            {
-                SimpleDataSet result = new SimpleDataSet(metadata());
-                for (ColumnFamilyStore cfs : ColumnFamilyStore.all())
-                {
-                    Metric metric = func.apply(cfs.metric);
-
-                    if(metric instanceof Counting)
-                    {
-                        Counting counting = (Counting) metric;
-                        result.row(counting.getCount(), cfs.keyspace.getName(), cfs.name);
-                        if (metric instanceof Sampling)
-                        {
-                            Sampling histo = (Sampling) metric;
-                            Snapshot snapshot = histo.getSnapshot();
-                            result.column(MEDIAN, (long) snapshot.getMedian())
-                                  .column(P99, (long) snapshot.get99thPercentile())
-                                  .column(MAX, (long) snapshot.getMax());
-                        }
-                        if (metric instanceof Metered)
-                        {
-                            Metered timer = (Metered) metric;
-                            result.column(RATE, timer.getFiveMinuteRate());
-                        }
-                    }
-                    else if (metric instanceof Gauge)
-                    {
-                        result.row(((Gauge) metric).getValue(), cfs.keyspace.getName(), cfs.name);
-                    }
-                    else if (metric instanceof Counter)
-                    {
-                        result.row(((Counter) metric).getCount(), cfs.keyspace.getName(), cfs.name);
-                    }
-                }
-                return result;
-            }
-        };
+        return metadata.build();
     }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org