This is an automated email from the ASF dual-hosted git repository.
marklau99 pushed a commit to branch IOTDB-5517
in repository https://gitbox.apache.org/repos/asf/iotdb.git
The following commit(s) were added to refs/heads/IOTDB-5517 by this push:
new 34b40793dd finish the part of disk io metrics
34b40793dd is described below
commit 34b40793dd3aec70415f6e81993b950682dca567
Author: LiuXuxin <[email protected]>
AuthorDate: Sat Feb 11 19:00:21 2023 +0800
finish the part of disk io metrics
---
.../iotdb/commons/service/metric/enums/Metric.java | 2 +-
.../iotdb/db/service/metrics/DiskMetrics.java | 224 ++++++++++-----------
.../metrics/io/AbstractDiskMetricsManager.java | 4 +-
.../metrics/io/LinuxDiskMetricsManager.java | 205 +++++++++++++++++--
.../service/metrics/io/MacDiskMetricsManager.java | 4 +-
.../metrics/io/WindowsDiskMetricsManager.java | 4 +-
6 files changed, 311 insertions(+), 132 deletions(-)
diff --git
a/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
b/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
index 391cda3c98..acff8839f6 100644
---
a/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
+++
b/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/enums/Metric.java
@@ -29,7 +29,7 @@ public enum Metric {
DISK_IO_SIZE,
DISK_IO_OPS,
DISK_IO_TIME,
- DISK_IO_SECTOR_SIZE,
+ DISK_IO_SECTOR_NUM,
PROCESS_IO_SIZE,
PROCESS_IO_OPS,
PROCESS_IO_TIME,
diff --git
a/server/src/main/java/org/apache/iotdb/db/service/metrics/DiskMetrics.java
b/server/src/main/java/org/apache/iotdb/db/service/metrics/DiskMetrics.java
index 939547bc0f..3ac3f087ec 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/metrics/DiskMetrics.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/metrics/DiskMetrics.java
@@ -38,72 +38,92 @@ public class DiskMetrics implements IMetricSet {
// metrics for disks
Set<String> diskIDs = diskMetricsManager.getDiskIDs();
for (String diskID : diskIDs) {
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.DISK_IO_SIZE.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getReadDataSizeForDisk().getOrDefault(diskID, 0L),
Tag.NAME.toString(),
"read",
Tag.NAME.toString(),
diskID);
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.DISK_IO_SIZE.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getWriteDataSizeForDisk().getOrDefault(diskID, 0L),
Tag.NAME.toString(),
"write",
Tag.NAME.toString(),
diskID);
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.DISK_IO_OPS.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getReadOperationCountForDisk().getOrDefault(diskID, 0),
Tag.NAME.toString(),
"read",
Tag.NAME.toString(),
diskID);
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.DISK_IO_OPS.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getWriteOperationCountForDisk().getOrDefault(diskID, 0),
Tag.NAME.toString(),
"write",
Tag.NAME.toString(),
diskID);
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.DISK_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getReadCostTimeForDisk().getOrDefault(diskID, 0L),
Tag.NAME.toString(),
"read",
Tag.NAME.toString(),
diskID);
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.DISK_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getWriteCostTimeForDisk().getOrDefault(diskID, 0L),
Tag.NAME.toString(),
"write",
Tag.NAME.toString(),
diskID);
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.DISK_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getAvgReadCostTimeOfEachOpsForDisk().getOrDefault(diskID,
0.0).longValue(),
Tag.NAME.toString(),
"avg_read",
Tag.NAME.toString(),
diskID);
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.DISK_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getAvgWriteCostTimeOfEachOpsForDisk().getOrDefault(diskID,
0.0).longValue(),
Tag.NAME.toString(),
"avg_write",
Tag.NAME.toString(),
diskID);
- metricService.remove(
- MetricType.AUTO_GAUGE,
- Metric.DISK_IO_SECTOR_SIZE.toString(),
+ metricService.createAutoGauge(
+ Metric.DISK_IO_SECTOR_NUM.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getAvgSectorCountOfEachReadForDisk().getOrDefault(diskID,
0.0).longValue(),
Tag.NAME.toString(),
"read",
Tag.NAME.toString(),
diskID);
- metricService.remove(
- MetricType.AUTO_GAUGE,
- Metric.DISK_IO_SECTOR_SIZE.toString(),
+ metricService.createAutoGauge(
+ Metric.DISK_IO_SECTOR_NUM.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ x -> x.getAvgSectorCountOfEachWriteForDisk().getOrDefault(diskID,
0.0).longValue(),
Tag.NAME.toString(),
"write",
Tag.NAME.toString(),
@@ -111,58 +131,74 @@ public class DiskMetrics implements IMetricSet {
}
// metrics for datanode and config node
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.PROCESS_IO_SIZE.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getReadDataSizeForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
"read");
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.PROCESS_IO_SIZE.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getWriteDataSizeForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
"write");
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.PROCESS_IO_OPS.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getReadOpsCountForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
"read");
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.PROCESS_IO_OPS.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getWriteOpsCountForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
"write");
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.PROCESS_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getReadCostTimeForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
"read");
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.PROCESS_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getWriteCostTimeForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
"write");
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.PROCESS_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getAvgReadCostTimeOfEachOpsForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
"avg_read");
- metricService.remove(
- MetricType.AUTO_GAUGE,
+ metricService.createAutoGauge(
Metric.PROCESS_IO_TIME.toString(),
+ MetricLevel.IMPORTANT,
+ diskMetricsManager,
+ AbstractDiskMetricsManager::getAvgWriteCostTimeOfEachOpsForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
@@ -174,92 +210,72 @@ public class DiskMetrics implements IMetricSet {
// metrics for disks
Set<String> diskIDs = diskMetricsManager.getDiskIDs();
for (String diskID : diskIDs) {
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.DISK_IO_SIZE.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- x -> x.getReadDataSizeForDisk().getOrDefault(diskID, 0L),
Tag.NAME.toString(),
"read",
Tag.NAME.toString(),
diskID);
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.DISK_IO_SIZE.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- x -> x.getWriteDataSizeForDisk().getOrDefault(diskID, 0L),
Tag.NAME.toString(),
"write",
Tag.NAME.toString(),
diskID);
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.DISK_IO_OPS.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- x -> x.getReadOperationCountForDisk().getOrDefault(diskID, 0),
Tag.NAME.toString(),
"read",
Tag.NAME.toString(),
diskID);
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.DISK_IO_OPS.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- x -> x.getWriteOperationCountForDisk().getOrDefault(diskID, 0),
Tag.NAME.toString(),
"write",
Tag.NAME.toString(),
diskID);
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.DISK_IO_TIME.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- x -> x.getReadCostTimeForDisk().getOrDefault(diskID, 0L),
Tag.NAME.toString(),
"read",
Tag.NAME.toString(),
diskID);
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.DISK_IO_TIME.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- x -> x.getWriteCostTimeForDisk().getOrDefault(diskID, 0L),
Tag.NAME.toString(),
"write",
Tag.NAME.toString(),
diskID);
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.DISK_IO_TIME.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- x -> x.getAvgReadCostTimeOfEachOpsForDisk().getOrDefault(diskID,
0.0).longValue(),
Tag.NAME.toString(),
"avg_read",
Tag.NAME.toString(),
diskID);
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.DISK_IO_TIME.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- x -> x.getAvgWriteCostTimeOfEachOpsForDisk().getOrDefault(diskID,
0.0).longValue(),
Tag.NAME.toString(),
"avg_write",
Tag.NAME.toString(),
diskID);
- metricService.createAutoGauge(
- Metric.DISK_IO_SECTOR_SIZE.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- x -> x.getAvgSectorSizeOfEachReadForDisk().getOrDefault(diskID,
0.0).longValue(),
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.DISK_IO_SECTOR_NUM.toString(),
Tag.NAME.toString(),
"read",
Tag.NAME.toString(),
diskID);
- metricService.createAutoGauge(
- Metric.DISK_IO_SECTOR_SIZE.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- x -> x.getAvgSectorSizeOfEachWriteForDisk().getOrDefault(diskID,
0.0).longValue(),
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
+ Metric.DISK_IO_SECTOR_NUM.toString(),
Tag.NAME.toString(),
"write",
Tag.NAME.toString(),
@@ -267,74 +283,58 @@ public class DiskMetrics implements IMetricSet {
}
// metrics for datanode and config node
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.PROCESS_IO_SIZE.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- AbstractDiskMetricsManager::getReadDataSizeForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
"read");
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.PROCESS_IO_SIZE.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- AbstractDiskMetricsManager::getWriteDataSizeForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
"write");
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.PROCESS_IO_OPS.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- AbstractDiskMetricsManager::getReadOpsCountForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
"read");
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.PROCESS_IO_OPS.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- AbstractDiskMetricsManager::getWriteOpsCountForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
"write");
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.PROCESS_IO_TIME.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- AbstractDiskMetricsManager::getReadCostTimeForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
"read");
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.PROCESS_IO_TIME.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- AbstractDiskMetricsManager::getWriteCostTimeForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
"write");
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.PROCESS_IO_TIME.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- AbstractDiskMetricsManager::getAvgReadCostTimeOfEachOpsForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
"avg_read");
- metricService.createAutoGauge(
+ metricService.remove(
+ MetricType.AUTO_GAUGE,
Metric.PROCESS_IO_TIME.toString(),
- MetricLevel.IMPORTANT,
- diskMetricsManager,
- AbstractDiskMetricsManager::getAvgWriteCostTimeOfEachOpsForDataNode,
Tag.NAME.toString(),
"datanode",
Tag.NAME.toString(),
diff --git
a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/AbstractDiskMetricsManager.java
b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/AbstractDiskMetricsManager.java
index 62fda3e2de..9ffe97c75e 100644
---
a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/AbstractDiskMetricsManager.java
+++
b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/AbstractDiskMetricsManager.java
@@ -40,9 +40,9 @@ public abstract class AbstractDiskMetricsManager {
public abstract Map<String, Double> getAvgWriteCostTimeOfEachOpsForDisk();
- public abstract Map<String, Double> getAvgSectorSizeOfEachReadForDisk();
+ public abstract Map<String, Double> getAvgSectorCountOfEachReadForDisk();
- public abstract Map<String, Double> getAvgSectorSizeOfEachWriteForDisk();
+ public abstract Map<String, Double> getAvgSectorCountOfEachWriteForDisk();
public abstract long getReadDataSizeForDataNode();
diff --git
a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/LinuxDiskMetricsManager.java
b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/LinuxDiskMetricsManager.java
index c505f27a6c..14e4a27600 100644
---
a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/LinuxDiskMetricsManager.java
+++
b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/LinuxDiskMetricsManager.java
@@ -19,59 +19,142 @@
package org.apache.iotdb.db.service.metrics.io;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
import java.util.Map;
+import java.util.Objects;
+import java.util.Scanner;
import java.util.Set;
+import java.util.stream.Collectors;
public class LinuxDiskMetricsManager extends AbstractDiskMetricsManager {
+ private final String DISK_STATS_FILE_PATH = "/proc/diskstats";
+ private final String DISK_ID_PATH = "/sys/block";
+ private final int DISK_ID_OFFSET = 3;
+ private final int DISK_READ_COUNT_OFFSET = 4;
+ private final int DISK_MERGED_READ_COUNT_OFFSET = 5;
+ private final int DISK_SECTOR_READ_COUNT_OFFSET = 6;
+ private final int DISK_READ_TIME_COST_OFFSET = 7;
+ private final int DISK_WRITE_COUNT_OFFSET = 8;
+ private final int DISK_MERGED_WRITE_COUNT_OFFSET = 9;
+ private final int DISK_SECTOR_WRITE_COUNT_OFFSET = 10;
+ private final int DISK_WRITE_TIME_COST_OFFSET = 11;
+ private final int DISK_IO_TOTAL_TIME_OFFSET = 13;
+ private final long UPDATE_INTERVAL = 10000L;
+ private Set<String> diskIDSet;
+ private long lastUpdateTime = 0L;
+ private String[] dataNodeProcessId;
+ private String[] configNodeProcessId;
+ private final Map<String, Integer> lastReadOperationCountForDisk = new
HashMap<>();
+ private final Map<String, Integer> lastWriteOperationCountForDisk = new
HashMap<>();
+ private final Map<String, Long> lastReadTimeCostForDisk = new HashMap<>();
+ private final Map<String, Long> lastWriteTimeCostForDisk = new HashMap<>();
+ private final Map<String, Long> lastReadSectorCountForDisk = new HashMap<>();
+ private final Map<String, Long> lastWriteSectorCountForDisk = new
HashMap<>();
+ private final Map<String, Integer> incrementReadOperationCountForDisk = new
HashMap<>();
+ private final Map<String, Integer> incrementWriteOperationCountForDisk = new
HashMap<>();
+ private final Map<String, Long> incrementReadTimeCostForDisk = new
HashMap<>();
+ private final Map<String, Long> incrementWriteTimeCostForDisk = new
HashMap<>();
+ private final Map<String, Long> incrementReadSectorCountForDisk = new
HashMap<>();
+ private final Map<String, Long> incrementWriteSectorCountForDisk = new
HashMap<>();
+
+ public LinuxDiskMetricsManager() {}
@Override
public Map<String, Long> getReadDataSizeForDisk() {
- return null;
+ checkUpdate();
+ Map<String, Long> readDataMap = new HashMap<>();
+ for (Map.Entry<String, Long> entry :
incrementReadSectorCountForDisk.entrySet()) {
+ // the data size in each sector is 512 byte
+ readDataMap.put(entry.getKey(), entry.getValue() * 512L / 1024L);
+ }
+ return readDataMap;
}
@Override
public Map<String, Long> getWriteDataSizeForDisk() {
- return null;
+ checkUpdate();
+ Map<String, Long> writeDataMap = new HashMap<>();
+ for (Map.Entry<String, Long> entry :
incrementWriteSectorCountForDisk.entrySet()) {
+ // the data size in each sector is 512 byte
+ writeDataMap.put(entry.getKey(), entry.getValue() * 512L / 1024L);
+ }
+ return writeDataMap;
}
@Override
public Map<String, Integer> getReadOperationCountForDisk() {
- return null;
+ checkUpdate();
+ return incrementReadOperationCountForDisk;
}
@Override
public Map<String, Integer> getWriteOperationCountForDisk() {
- return null;
+ return incrementWriteOperationCountForDisk;
}
@Override
public Map<String, Long> getReadCostTimeForDisk() {
- return null;
+ return incrementReadTimeCostForDisk;
}
@Override
public Map<String, Long> getWriteCostTimeForDisk() {
- return null;
+ return incrementWriteTimeCostForDisk;
}
@Override
public Map<String, Double> getAvgReadCostTimeOfEachOpsForDisk() {
- return null;
+ Map<String, Double> avgReadTimeCostMap = new HashMap<>();
+ for (Map.Entry<String, Long> readCostEntry :
incrementReadTimeCostForDisk.entrySet()) {
+ int readOpsCount =
incrementReadOperationCountForDisk.getOrDefault(readCostEntry.getKey(), 1);
+ avgReadTimeCostMap.put(
+ readCostEntry.getKey(), (double) readCostEntry.getValue() /
readOpsCount);
+ }
+ return avgReadTimeCostMap;
}
@Override
public Map<String, Double> getAvgWriteCostTimeOfEachOpsForDisk() {
- return null;
+ Map<String, Double> avgWriteTimeCostMap = new HashMap<>();
+ for (Map.Entry<String, Long> writeCostEntry :
incrementWriteTimeCostForDisk.entrySet()) {
+ int writeOpsCount =
+
incrementWriteOperationCountForDisk.getOrDefault(writeCostEntry.getKey(), 1);
+ avgWriteTimeCostMap.put(
+ writeCostEntry.getKey(), (double) writeCostEntry.getValue() /
writeOpsCount);
+ }
+ return avgWriteTimeCostMap;
}
@Override
- public Map<String, Double> getAvgSectorSizeOfEachReadForDisk() {
- return null;
+ public Map<String, Double> getAvgSectorCountOfEachReadForDisk() {
+ Map<String, Double> avgSectorSizeOfRead = new HashMap<>();
+ for (Map.Entry<String, Long> readSectorSizeEntry :
incrementReadSectorCountForDisk.entrySet()) {
+ int readOpsCount =
+
incrementReadOperationCountForDisk.getOrDefault(readSectorSizeEntry.getKey(),
1);
+ avgSectorSizeOfRead.put(
+ readSectorSizeEntry.getKey(), ((double)
readSectorSizeEntry.getValue()) / readOpsCount);
+ }
+ return avgSectorSizeOfRead;
}
@Override
- public Map<String, Double> getAvgSectorSizeOfEachWriteForDisk() {
- return null;
+ public Map<String, Double> getAvgSectorCountOfEachWriteForDisk() {
+ Map<String, Double> avgSectorSizeOfWrite = new HashMap<>();
+ for (Map.Entry<String, Long> writeSectorSizeEntry :
+ incrementWriteSectorCountForDisk.entrySet()) {
+ int writeOpsCount =
+
incrementWriteOperationCountForDisk.getOrDefault(writeSectorSizeEntry.getKey(),
1);
+ avgSectorSizeOfWrite.put(
+ writeSectorSizeEntry.getKey(),
+ ((double) writeSectorSizeEntry.getValue()) / writeOpsCount);
+ }
+ return avgSectorSizeOfWrite;
}
@Override
@@ -116,6 +199,102 @@ public class LinuxDiskMetricsManager extends
AbstractDiskMetricsManager {
@Override
public Set<String> getDiskIDs() {
- return null;
+ File diskIDFolder = new File(DISK_ID_PATH);
+ if (!diskIDFolder.exists()) {
+ return Collections.emptySet();
+ }
+ diskIDSet =
+ new
ArrayList<>(Arrays.asList(Objects.requireNonNull(diskIDFolder.listFiles())))
+ .stream()
+ .filter(x -> !x.getName().startsWith("loop") &&
!x.getName().startsWith("ram"))
+ .map(File::getName)
+ .collect(Collectors.toSet());
+ return diskIDSet;
+ }
+
+ private void updateDiskInfo() {
+ lastUpdateTime = System.currentTimeMillis();
+ File diskStatsFile = new File(DISK_STATS_FILE_PATH);
+ if (!diskStatsFile.exists()) {
+ return;
+ }
+ try (Scanner diskStatsScanner = new
Scanner(Files.newInputStream(diskStatsFile.toPath()))) {
+ while (diskStatsScanner.hasNextLine()) {
+ String[] diskInfo = diskStatsScanner.nextLine().split("\\s+");
+ String diskId = diskInfo[DISK_ID_OFFSET];
+ if (!diskIDSet.contains(diskId)) {
+ continue;
+ }
+ int readOperationCount =
Integer.parseInt(diskInfo[DISK_READ_COUNT_OFFSET]);
+ int writeOperationCount =
Integer.parseInt(diskInfo[DISK_WRITE_COUNT_OFFSET]);
+ int mergedReadOperationCount =
Integer.parseInt(diskInfo[DISK_MERGED_READ_COUNT_OFFSET]);
+ int mergedWriteOperationCount =
Integer.parseInt(diskInfo[DISK_MERGED_WRITE_COUNT_OFFSET]);
+ long sectorReadCount =
Long.parseLong(diskInfo[DISK_SECTOR_READ_COUNT_OFFSET]);
+ long sectorWriteCount =
Long.parseLong(diskInfo[DISK_SECTOR_WRITE_COUNT_OFFSET]);
+ long readTimeCost =
Long.parseLong(diskInfo[DISK_READ_TIME_COST_OFFSET]);
+ long writeTimeCost =
Long.parseLong(diskInfo[DISK_WRITE_TIME_COST_OFFSET]);
+
+ int lastReadOperationCount =
lastReadOperationCountForDisk.getOrDefault(diskId, 0);
+ int lastWriteOperationCount =
lastWriteOperationCountForDisk.getOrDefault(diskId, 0);
+ // int lastMergedReadOperationCount = lastM
+ long lastSectorReadCount =
lastReadSectorCountForDisk.getOrDefault(diskId, 0L);
+ long lastSectorWriteCount =
lastWriteSectorCountForDisk.getOrDefault(diskId, 0L);
+ long lastReadTime = lastReadTimeCostForDisk.getOrDefault(diskId, 0L);
+ long lastWriteTime = lastWriteTimeCostForDisk.getOrDefault(diskId, 0L);
+
+ if (lastReadOperationCount != 0) {
+ incrementReadOperationCountForDisk.put(
+ diskId, readOperationCount - lastReadOperationCount);
+ } else {
+ incrementReadOperationCountForDisk.put(diskId, 0);
+ }
+
+ if (lastWriteOperationCount != 0) {
+ incrementWriteOperationCountForDisk.put(
+ diskId, writeOperationCount - lastWriteOperationCount);
+ } else {
+ incrementWriteOperationCountForDisk.put(diskId, 0);
+ }
+
+ if (lastSectorReadCount != 0) {
+ incrementReadSectorCountForDisk.put(diskId, sectorReadCount -
lastSectorReadCount);
+ } else {
+ incrementReadSectorCountForDisk.put(diskId, 0L);
+ }
+
+ if (lastSectorWriteCount != 0) {
+ incrementWriteSectorCountForDisk.put(diskId, sectorWriteCount -
lastSectorWriteCount);
+ } else {
+ incrementWriteSectorCountForDisk.put(diskId, 0L);
+ }
+
+ if (lastReadTime != 0) {
+ incrementReadTimeCostForDisk.put(diskId, readTimeCost -
lastReadTime);
+ } else {
+ incrementReadTimeCostForDisk.put(diskId, 0L);
+ }
+
+ if (lastWriteTime != 0) {
+ incrementWriteTimeCostForDisk.put(diskId, writeTimeCost -
lastWriteTime);
+ } else {
+ incrementWriteTimeCostForDisk.put(diskId, 0L);
+ }
+
+ lastReadOperationCountForDisk.put(diskId, readOperationCount);
+ lastWriteOperationCountForDisk.put(diskId, writeOperationCount);
+ lastReadSectorCountForDisk.put(diskId, sectorReadCount);
+ lastWriteSectorCountForDisk.put(diskId, sectorWriteCount);
+ lastReadTimeCostForDisk.put(diskId, readTimeCost);
+ lastWriteTimeCostForDisk.put(diskId, writeTimeCost);
+ }
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private void checkUpdate() {
+ if (System.currentTimeMillis() - lastUpdateTime > UPDATE_INTERVAL) {
+ updateDiskInfo();
+ }
}
}
diff --git
a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/MacDiskMetricsManager.java
b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/MacDiskMetricsManager.java
index 9db61a0373..a1ebb3444a 100644
---
a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/MacDiskMetricsManager.java
+++
b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/MacDiskMetricsManager.java
@@ -67,12 +67,12 @@ public class MacDiskMetricsManager extends
AbstractDiskMetricsManager {
}
@Override
- public Map<String, Double> getAvgSectorSizeOfEachReadForDisk() {
+ public Map<String, Double> getAvgSectorCountOfEachReadForDisk() {
return Collections.emptyMap();
}
@Override
- public Map<String, Double> getAvgSectorSizeOfEachWriteForDisk() {
+ public Map<String, Double> getAvgSectorCountOfEachWriteForDisk() {
return Collections.emptyMap();
}
diff --git
a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/WindowsDiskMetricsManager.java
b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/WindowsDiskMetricsManager.java
index b87d4c4470..aeb4104094 100644
---
a/server/src/main/java/org/apache/iotdb/db/service/metrics/io/WindowsDiskMetricsManager.java
+++
b/server/src/main/java/org/apache/iotdb/db/service/metrics/io/WindowsDiskMetricsManager.java
@@ -67,12 +67,12 @@ public class WindowsDiskMetricsManager extends
AbstractDiskMetricsManager {
}
@Override
- public Map<String, Double> getAvgSectorSizeOfEachReadForDisk() {
+ public Map<String, Double> getAvgSectorCountOfEachReadForDisk() {
return Collections.emptyMap();
}
@Override
- public Map<String, Double> getAvgSectorSizeOfEachWriteForDisk() {
+ public Map<String, Double> getAvgSectorCountOfEachWriteForDisk() {
return Collections.emptyMap();
}