You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by ma...@apache.org on 2023/02/10 14:47:59 UTC
[iotdb] branch IOTDB-5517 updated: create and remove the gauge in the DiskMetrics and redefine the interface
This is an automated email from the ASF dual-hosted git repository.
marklau99 pushed a commit to branch IOTDB-5517
in repository https://gitbox.apache.org/repos/asf/iotdb.git
The following commit(s) were added to refs/heads/IOTDB-5517 by this push:
new 1c9c7694d9 create and remove the gauge in the DiskMetrics and redefine the interface
1c9c7694d9 is described below
commit 1c9c7694d9dc99e8e66d2f807dd9301ee3682532
Author: LiuXuxin <li...@outlook.com>
AuthorDate: Fri Feb 10 22:47:46 2023 +0800
create and remove the gauge in the DiskMetrics and redefine the interface
---
.../iotdb/commons/service/metric/enums/Metric.java | 7 +
.../db/service/metrics/DataNodeMetricsHelper.java | 1 +
.../iotdb/db/service/metrics/DiskMetrics.java | 315 ++++++++++++++++++++-
.../metrics/io/AbstractDiskMetricsManager.java | 24 +-
.../metrics/io/LinuxDiskMetricsManager.java | 37 +--
.../service/metrics/io/MacDiskMetricsManager.java | 59 ++--
.../metrics/io/WindowsDiskMetricsManager.java | 59 ++--
7 files changed, 391 insertions(+), 111 deletions(-)
diff --git a/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java b/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
index b7cd68dc5e..391cda3c98 100644
--- a/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
+++ b/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
@@ -26,6 +26,13 @@ public enum Metric {
QUEUE,
FILE_SIZE,
FILE_COUNT,
+ DISK_IO_SIZE,
+ DISK_IO_OPS,
+ DISK_IO_TIME,
+ DISK_IO_SECTOR_SIZE,
+ PROCESS_IO_SIZE,
+ PROCESS_IO_OPS,
+ PROCESS_IO_TIME,
MEM,
CACHE,
CACHE_HIT,
diff --git a/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java b/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java
index 538ac38876..73aab0cb5b 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/metrics/DataNodeMetricsHelper.java
@@ -39,6 +39,7 @@ public class DataNodeMetricsHelper {
MetricService.getInstance().addMetricSet(new CompactionMetrics());
MetricService.getInstance().addMetricSet(new ProcessMetrics());
MetricService.getInstance().addMetricSet(new SystemMetrics(true));
+ MetricService.getInstance().addMetricSet(new DiskMetrics());
// bind query related metrics
MetricService.getInstance().addMetricSet(new QueryPlanCostMetricSet());
diff --git a/server/src/main/java/org/apache/iotdb/db/service/metrics/DiskMetrics.java b/server/src/main/java/org/apache/iotdb/db/service/metrics/DiskMetrics.java
index a0f11ee28a..939547bc0f 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/metrics/DiskMetrics.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/metrics/DiskMetrics.java
@@ -19,14 +19,325 @@
package org.apache.iotdb.db.service.metrics;
+import org.apache.iotdb.commons.service.metric.enums.Metric;
+import org.apache.iotdb.commons.service.metric.enums.Tag;
+import org.apache.iotdb.db.service.metrics.io.AbstractDiskMetricsManager;
import org.apache.iotdb.metrics.AbstractMetricService;
import org.apache.iotdb.metrics.metricsets.IMetricSet;
+import org.apache.iotdb.metrics.utils.MetricLevel;
+import org.apache.iotdb.metrics.utils.MetricType;
+
+import java.util.Set;
public class DiskMetrics implements IMetricSet {
+ private final AbstractDiskMetricsManager diskMetricsManager =
+ AbstractDiskMetricsManager.getDiskMetricsManager();
@Override
- public void bindTo(AbstractMetricService metricService) {}
+ public void bindTo(AbstractMetricService metricService) {
+ // metrics for disks
+ Set<String> diskIDs = diskMetricsManager.getDiskIDs();
+ for (String diskID : diskIDs) {
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.DISK_IO_SIZE.toString(),
+ Tag.NAME.toString(),
+ "read",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.DISK_IO_SIZE.toString(),
+ Tag.NAME.toString(),
+ "write",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.DISK_IO_OPS.toString(),
+ Tag.NAME.toString(),
+ "read",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.DISK_IO_OPS.toString(),
+ Tag.NAME.toString(),
+ "write",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.DISK_IO_TIME.toString(),
+ Tag.NAME.toString(),
+ "read",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.DISK_IO_TIME.toString(),
+ Tag.NAME.toString(),
+ "write",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.DISK_IO_TIME.toString(),
+ Tag.NAME.toString(),
+ "avg_read",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.DISK_IO_TIME.toString(),
+ Tag.NAME.toString(),
+ "avg_write",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.DISK_IO_SECTOR_SIZE.toString(),
+ Tag.NAME.toString(),
+ "read",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.DISK_IO_SECTOR_SIZE.toString(),
+ Tag.NAME.toString(),
+ "write",
+ Tag.NAME.toString(),
+ diskID);
+ }
+
+ // metrics for datanode and config node
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.PROCESS_IO_SIZE.toString(),
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "read");
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.PROCESS_IO_SIZE.toString(),
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "write");
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.PROCESS_IO_OPS.toString(),
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "read");
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.PROCESS_IO_OPS.toString(),
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "write");
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.PROCESS_IO_TIME.toString(),
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "read");
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.PROCESS_IO_TIME.toString(),
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "write");
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.PROCESS_IO_TIME.toString(),
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "avg_read");
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.PROCESS_IO_TIME.toString(),
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "avg_write");
+ }
@Override
- public void unbindFrom(AbstractMetricService metricService) {}
+ public void unbindFrom(AbstractMetricService metricService) {
+ // metrics for disks
+ Set<String> diskIDs = diskMetricsManager.getDiskIDs();
+ for (String diskID : diskIDs) {
+ metricService.createAutoGauge(
+ Metric.DISK_IO_SIZE.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getReadDataSizeForDisk().getOrDefault(diskID, 0L),
+ Tag.NAME.toString(),
+ "read",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.createAutoGauge(
+ Metric.DISK_IO_SIZE.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getWriteDataSizeForDisk().getOrDefault(diskID, 0L),
+ Tag.NAME.toString(),
+ "write",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.createAutoGauge(
+ Metric.DISK_IO_OPS.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getReadOperationCountForDisk().getOrDefault(diskID, 0),
+ Tag.NAME.toString(),
+ "read",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.createAutoGauge(
+ Metric.DISK_IO_OPS.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getWriteOperationCountForDisk().getOrDefault(diskID, 0),
+ Tag.NAME.toString(),
+ "write",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.createAutoGauge(
+ Metric.DISK_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getReadCostTimeForDisk().getOrDefault(diskID, 0L),
+ Tag.NAME.toString(),
+ "read",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.createAutoGauge(
+ Metric.DISK_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getWriteCostTimeForDisk().getOrDefault(diskID, 0L),
+ Tag.NAME.toString(),
+ "write",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.createAutoGauge(
+ Metric.DISK_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getAvgReadCostTimeOfEachOpsForDisk().getOrDefault(diskID, 0.0).longValue(),
+ Tag.NAME.toString(),
+ "avg_read",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.createAutoGauge(
+ Metric.DISK_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getAvgWriteCostTimeOfEachOpsForDisk().getOrDefault(diskID, 0.0).longValue(),
+ Tag.NAME.toString(),
+ "avg_write",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.createAutoGauge(
+ Metric.DISK_IO_SECTOR_SIZE.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getAvgSectorSizeOfEachReadForDisk().getOrDefault(diskID, 0.0).longValue(),
+ Tag.NAME.toString(),
+ "read",
+ Tag.NAME.toString(),
+ diskID);
+ metricService.createAutoGauge(
+ Metric.DISK_IO_SECTOR_SIZE.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getAvgSectorSizeOfEachWriteForDisk().getOrDefault(diskID, 0.0).longValue(),
+ Tag.NAME.toString(),
+ "write",
+ Tag.NAME.toString(),
+ diskID);
+ }
+
+ // metrics for datanode and config node
+ metricService.createAutoGauge(
+ Metric.PROCESS_IO_SIZE.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getReadDataSizeForDataNode,
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "read");
+ metricService.createAutoGauge(
+ Metric.PROCESS_IO_SIZE.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getWriteDataSizeForDataNode,
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "write");
+ metricService.createAutoGauge(
+ Metric.PROCESS_IO_OPS.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getReadOpsCountForDataNode,
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "read");
+ metricService.createAutoGauge(
+ Metric.PROCESS_IO_OPS.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getWriteOpsCountForDataNode,
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "write");
+ metricService.createAutoGauge(
+ Metric.PROCESS_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getReadCostTimeForDataNode,
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "read");
+ metricService.createAutoGauge(
+ Metric.PROCESS_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getWriteCostTimeForDataNode,
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "write");
+ metricService.createAutoGauge(
+ Metric.PROCESS_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getAvgReadCostTimeOfEachOpsForDataNode,
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "avg_read");
+ metricService.createAutoGauge(
+ Metric.PROCESS_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getAvgWriteCostTimeOfEachOpsForDataNode,
+ Tag.NAME.toString(),
+ "datanode",
+ Tag.NAME.toString(),
+ "avg_write");
+ }
}
diff --git a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/AbstractDiskMetricsManager.java b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/AbstractDiskMetricsManager.java
index c26153d5f0..62fda3e2de 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/AbstractDiskMetricsManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/AbstractDiskMetricsManager.java
@@ -20,20 +20,18 @@
package org.apache.iotdb.db.service.metrics.io;
import java.util.Map;
+import java.util.Set;
public abstract class AbstractDiskMetricsManager {
- public abstract Map<String, Double> getReadOpsRateForDisk();
-
- public abstract Map<String, Double> getWriteOpsRateForDisk();
-
- public abstract Map<String, Double> getReadThroughputForDisk();
-
- public abstract Map<String, Double> getWriteThroughPutForDisk();
public abstract Map<String, Long> getReadDataSizeForDisk();
public abstract Map<String, Long> getWriteDataSizeForDisk();
+ public abstract Map<String, Integer> getReadOperationCountForDisk();
+
+ public abstract Map<String, Integer> getWriteOperationCountForDisk();
+
public abstract Map<String, Long> getReadCostTimeForDisk();
public abstract Map<String, Long> getWriteCostTimeForDisk();
@@ -50,13 +48,9 @@ public abstract class AbstractDiskMetricsManager {
public abstract long getWriteDataSizeForDataNode();
- public abstract double getReadThroughputForDataNode();
-
- public abstract double getWriteThroughputForDataNode();
+ public abstract long getReadOpsCountForDataNode();
- public abstract long getReadOpsRateForDataNode();
-
- public abstract long getWriteOpsRateForDataNode();
+ public abstract long getWriteOpsCountForDataNode();
public abstract long getReadCostTimeForDataNode();
@@ -66,6 +60,8 @@ public abstract class AbstractDiskMetricsManager {
public abstract long getAvgWriteCostTimeOfEachOpsForDataNode();
+ public abstract Set<String> getDiskIDs();
+
/**
* Return different implementation of DiskMetricsManager according to OS type.
*
@@ -74,7 +70,7 @@ public abstract class AbstractDiskMetricsManager {
public static AbstractDiskMetricsManager getDiskMetricsManager() {
String os = System.getProperty("os.name");
if (os == null) {
- throw new RuntimeException("Cannot get the os type");
+ throw new RuntimeException("Cannot get the name of operation system");
}
if (os.startsWith("windows")) {
diff --git a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/LinuxDiskMetricsManager.java b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/LinuxDiskMetricsManager.java
index c6d7a51dc0..c505f27a6c 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/LinuxDiskMetricsManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/LinuxDiskMetricsManager.java
@@ -20,35 +20,27 @@
package org.apache.iotdb.db.service.metrics.io;
import java.util.Map;
+import java.util.Set;
public class LinuxDiskMetricsManager extends AbstractDiskMetricsManager {
- @Override
- public Map<String, Double> getReadOpsRateForDisk() {
- return null;
- }
-
- @Override
- public Map<String, Double> getWriteOpsRateForDisk() {
- return null;
- }
@Override
- public Map<String, Double> getReadThroughputForDisk() {
+ public Map<String, Long> getReadDataSizeForDisk() {
return null;
}
@Override
- public Map<String, Double> getWriteThroughPutForDisk() {
+ public Map<String, Long> getWriteDataSizeForDisk() {
return null;
}
@Override
- public Map<String, Long> getReadDataSizeForDisk() {
+ public Map<String, Integer> getReadOperationCountForDisk() {
return null;
}
@Override
- public Map<String, Long> getWriteDataSizeForDisk() {
+ public Map<String, Integer> getWriteOperationCountForDisk() {
return null;
}
@@ -93,22 +85,12 @@ public class LinuxDiskMetricsManager extends AbstractDiskMetricsManager {
}
@Override
- public double getReadThroughputForDataNode() {
+ public long getReadOpsCountForDataNode() {
return 0;
}
@Override
- public double getWriteThroughputForDataNode() {
- return 0;
- }
-
- @Override
- public long getReadOpsRateForDataNode() {
- return 0;
- }
-
- @Override
- public long getWriteOpsRateForDataNode() {
+ public long getWriteOpsCountForDataNode() {
return 0;
}
@@ -131,4 +113,9 @@ public class LinuxDiskMetricsManager extends AbstractDiskMetricsManager {
public long getAvgWriteCostTimeOfEachOpsForDataNode() {
return 0;
}
+
+ @Override
+ public Set<String> getDiskIDs() {
+ return null;
+ }
}
diff --git a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/MacDiskMetricsManager.java b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/MacDiskMetricsManager.java
index 7bc126376f..9db61a0373 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/MacDiskMetricsManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/MacDiskMetricsManager.java
@@ -19,67 +19,61 @@
package org.apache.iotdb.db.service.metrics.io;
+import java.util.Collections;
import java.util.Map;
+import java.util.Set;
+/** Disk Metrics Manager for macOS, not implemented yet. */
public class MacDiskMetricsManager extends AbstractDiskMetricsManager {
- @Override
- public Map<String, Double> getReadOpsRateForDisk() {
- return null;
- }
-
- @Override
- public Map<String, Double> getWriteOpsRateForDisk() {
- return null;
- }
@Override
- public Map<String, Double> getReadThroughputForDisk() {
- return null;
+ public Map<String, Long> getReadDataSizeForDisk() {
+ return Collections.emptyMap();
}
@Override
- public Map<String, Double> getWriteThroughPutForDisk() {
- return null;
+ public Map<String, Long> getWriteDataSizeForDisk() {
+ return Collections.emptyMap();
}
@Override
- public Map<String, Long> getReadDataSizeForDisk() {
- return null;
+ public Map<String, Integer> getReadOperationCountForDisk() {
+ return Collections.emptyMap();
}
@Override
- public Map<String, Long> getWriteDataSizeForDisk() {
- return null;
+ public Map<String, Integer> getWriteOperationCountForDisk() {
+ return Collections.emptyMap();
}
@Override
public Map<String, Long> getReadCostTimeForDisk() {
- return null;
+ return Collections.emptyMap();
}
@Override
public Map<String, Long> getWriteCostTimeForDisk() {
- return null;
+ return Collections.emptyMap();
}
@Override
public Map<String, Double> getAvgReadCostTimeOfEachOpsForDisk() {
- return null;
+ return Collections.emptyMap();
}
@Override
public Map<String, Double> getAvgWriteCostTimeOfEachOpsForDisk() {
- return null;
+ return Collections.emptyMap();
}
@Override
public Map<String, Double> getAvgSectorSizeOfEachReadForDisk() {
- return null;
+ return Collections.emptyMap();
}
@Override
public Map<String, Double> getAvgSectorSizeOfEachWriteForDisk() {
- return null;
+ return Collections.emptyMap();
}
@Override
@@ -93,22 +87,12 @@ public class MacDiskMetricsManager extends AbstractDiskMetricsManager {
}
@Override
- public double getReadThroughputForDataNode() {
- return 0;
- }
-
- @Override
- public double getWriteThroughputForDataNode() {
- return 0;
- }
-
- @Override
- public long getReadOpsRateForDataNode() {
+ public long getReadOpsCountForDataNode() {
return 0;
}
@Override
- public long getWriteOpsRateForDataNode() {
+ public long getWriteOpsCountForDataNode() {
return 0;
}
@@ -131,4 +115,9 @@ public class MacDiskMetricsManager extends AbstractDiskMetricsManager {
public long getAvgWriteCostTimeOfEachOpsForDataNode() {
return 0;
}
+
+ @Override
+ public Set<String> getDiskIDs() {
+ return Collections.emptySet();
+ }
}
diff --git a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/WindowsDiskMetricsManager.java b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/WindowsDiskMetricsManager.java
index a330fb9d0c..b87d4c4470 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/WindowsDiskMetricsManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/WindowsDiskMetricsManager.java
@@ -19,67 +19,61 @@
package org.apache.iotdb.db.service.metrics.io;
+import java.util.Collections;
import java.util.Map;
+import java.util.Set;
+/** Disk Metrics Manager for Windows system, not implemented yet. */
public class WindowsDiskMetricsManager extends AbstractDiskMetricsManager {
- @Override
- public Map<String, Double> getReadOpsRateForDisk() {
- return null;
- }
-
- @Override
- public Map<String, Double> getWriteOpsRateForDisk() {
- return null;
- }
@Override
- public Map<String, Double> getReadThroughputForDisk() {
- return null;
+ public Map<String, Long> getReadDataSizeForDisk() {
+ return Collections.emptyMap();
}
@Override
- public Map<String, Double> getWriteThroughPutForDisk() {
- return null;
+ public Map<String, Long> getWriteDataSizeForDisk() {
+ return Collections.emptyMap();
}
@Override
- public Map<String, Long> getReadDataSizeForDisk() {
- return null;
+ public Map<String, Integer> getReadOperationCountForDisk() {
+ return Collections.emptyMap();
}
@Override
- public Map<String, Long> getWriteDataSizeForDisk() {
- return null;
+ public Map<String, Integer> getWriteOperationCountForDisk() {
+ return Collections.emptyMap();
}
@Override
public Map<String, Long> getReadCostTimeForDisk() {
- return null;
+ return Collections.emptyMap();
}
@Override
public Map<String, Long> getWriteCostTimeForDisk() {
- return null;
+ return Collections.emptyMap();
}
@Override
public Map<String, Double> getAvgReadCostTimeOfEachOpsForDisk() {
- return null;
+ return Collections.emptyMap();
}
@Override
public Map<String, Double> getAvgWriteCostTimeOfEachOpsForDisk() {
- return null;
+ return Collections.emptyMap();
}
@Override
public Map<String, Double> getAvgSectorSizeOfEachReadForDisk() {
- return null;
+ return Collections.emptyMap();
}
@Override
public Map<String, Double> getAvgSectorSizeOfEachWriteForDisk() {
- return null;
+ return Collections.emptyMap();
}
@Override
@@ -93,22 +87,12 @@ public class WindowsDiskMetricsManager extends AbstractDiskMetricsManager {
}
@Override
- public double getReadThroughputForDataNode() {
- return 0;
- }
-
- @Override
- public double getWriteThroughputForDataNode() {
- return 0;
- }
-
- @Override
- public long getReadOpsRateForDataNode() {
+ public long getReadOpsCountForDataNode() {
return 0;
}
@Override
- public long getWriteOpsRateForDataNode() {
+ public long getWriteOpsCountForDataNode() {
return 0;
}
@@ -131,4 +115,9 @@ public class WindowsDiskMetricsManager extends AbstractDiskMetricsManager {
public long getAvgWriteCostTimeOfEachOpsForDataNode() {
return 0;
}
+
+ @Override
+ public Set<String> getDiskIDs() {
+ return Collections.emptySet();
+ }
}