You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kylin.apache.org by xx...@apache.org on 2022/12/05 10:21:03 UTC

[kylin] 11/22: KYLIN-5317 Change parameter kylin.metrics.hdfs-periodic-calculation-enabled to default true

This is an automated email from the ASF dual-hosted git repository.

xxyu pushed a commit to branch kylin5
in repository https://gitbox.apache.org/repos/asf/kylin.git

commit a79a9b487df43b2f6b8acf1a476d94520d0532e9
Author: Guoliang Sun <gu...@kyligence.io>
AuthorDate: Fri Oct 14 13:52:29 2022 +0800

    KYLIN-5317 Change parameter kylin.metrics.hdfs-periodic-calculation-enabled to default true
---
 .../org/apache/kylin/common/KylinConfigBase.java   |  2 +-
 .../apache/kylin/common/KylinConfigBaseTest.java   |  1 +
 .../apache/kylin/metrics/HdfsCapacityMetrics.java  | 34 ++++++++++++----------
 .../kylin/metrics/HdfsCapacityMetricsTest.java     | 26 +++++++++--------
 4 files changed, 34 insertions(+), 29 deletions(-)

diff --git a/src/core-common/src/main/java/org/apache/kylin/common/KylinConfigBase.java b/src/core-common/src/main/java/org/apache/kylin/common/KylinConfigBase.java
index acfe61c924..5fe045c115 100644
--- a/src/core-common/src/main/java/org/apache/kylin/common/KylinConfigBase.java
+++ b/src/core-common/src/main/java/org/apache/kylin/common/KylinConfigBase.java
@@ -3616,7 +3616,7 @@ public abstract class KylinConfigBase implements Serializable {
     }
 
     public boolean isHdfsMetricsPeriodicCalculationEnabled() {
-        return Boolean.parseBoolean(getOptional("kylin.metrics.hdfs-periodic-calculation-enabled", FALSE));
+        return Boolean.parseBoolean(getOptional("kylin.metrics.hdfs-periodic-calculation-enabled", TRUE));
     }
 
     public long getHdfsMetricsPeriodicCalculationInterval() {
diff --git a/src/core-common/src/test/java/org/apache/kylin/common/KylinConfigBaseTest.java b/src/core-common/src/test/java/org/apache/kylin/common/KylinConfigBaseTest.java
index 3630c7bbd1..16f653d46b 100644
--- a/src/core-common/src/test/java/org/apache/kylin/common/KylinConfigBaseTest.java
+++ b/src/core-common/src/test/java/org/apache/kylin/common/KylinConfigBaseTest.java
@@ -1258,6 +1258,7 @@ class KylinConfigBaseTest {
     @Test
     void testIsHdfsMetricsPeriodicCalculationEnabled() {
         KylinConfig config = KylinConfig.getInstanceFromEnv();
+        config.setProperty("kylin.metrics.hdfs-periodic-calculation-enabled", "false");
         assertFalse(config.isHdfsMetricsPeriodicCalculationEnabled());
         config.setProperty("kylin.metrics.hdfs-periodic-calculation-enabled", "true");
         assertTrue(config.isHdfsMetricsPeriodicCalculationEnabled());
diff --git a/src/core-metadata/src/main/java/org/apache/kylin/metrics/HdfsCapacityMetrics.java b/src/core-metadata/src/main/java/org/apache/kylin/metrics/HdfsCapacityMetrics.java
index d26d509155..9b114779ea 100644
--- a/src/core-metadata/src/main/java/org/apache/kylin/metrics/HdfsCapacityMetrics.java
+++ b/src/core-metadata/src/main/java/org/apache/kylin/metrics/HdfsCapacityMetrics.java
@@ -18,7 +18,15 @@
 
 package org.apache.kylin.metrics;
 
-import lombok.extern.slf4j.Slf4j;
+import java.io.IOException;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -32,19 +40,12 @@ import org.apache.kylin.common.util.NamedThreadFactory;
 import org.apache.kylin.metadata.project.NProjectManager;
 import org.apache.kylin.metadata.project.ProjectInstance;
 
-import java.io.IOException;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
+import lombok.extern.slf4j.Slf4j;
 
 /**
  * 1. Unify the entry point for all calculation calls to obtain the capacity of the WorkingDir through scheduled threads
  * 2. Expose two configurations externally:
- * - function enable switch: kylin.metrics.hdfs-periodic-calculation-enabled  - default false
+ * - function enable switch: kylin.metrics.hdfs-periodic-calculation-enabled  - default true
  * - polling time parameter: kylin.metrics.hdfs-periodic-calculation-interval - default 5min
  */
 @Slf4j
@@ -68,7 +69,8 @@ public class HdfsCapacityMetrics {
         SERVICE_INFO = AddressUtil.getLocalInstance();
         WORKING_FS = HadoopUtil.getWorkingFileSystem();
         HDFS_CAPACITY_METRICS_PATH = new Path(KYLIN_CONFIG.getHdfsMetricsDir("hdfsCapacity.json"));
-        HDFS_METRICS_SCHEDULED_EXECUTOR = Executors.newScheduledThreadPool(1, new NamedThreadFactory("HdfsMetricsChecker"));
+        HDFS_METRICS_SCHEDULED_EXECUTOR = Executors.newScheduledThreadPool(1,
+                new NamedThreadFactory("HdfsMetricsChecker"));
         registerHdfsMetrics();
     }
 
@@ -85,8 +87,8 @@ public class HdfsCapacityMetrics {
         hdfsMetricsPeriodicCalculationEnabled = KYLIN_CONFIG.isHdfsMetricsPeriodicCalculationEnabled();
         if (hdfsMetricsPeriodicCalculationEnabled) {
             log.info("HDFS metrics periodic calculation is enabled, path: {}", HDFS_CAPACITY_METRICS_PATH);
-            HDFS_METRICS_SCHEDULED_EXECUTOR.scheduleAtFixedRate(HdfsCapacityMetrics::handleNodeHdfsMetrics,
-                    0, KYLIN_CONFIG.getHdfsMetricsPeriodicCalculationInterval(), TimeUnit.MILLISECONDS);
+            HDFS_METRICS_SCHEDULED_EXECUTOR.scheduleAtFixedRate(HdfsCapacityMetrics::handleNodeHdfsMetrics, 0,
+                    KYLIN_CONFIG.getHdfsMetricsPeriodicCalculationInterval(), TimeUnit.MILLISECONDS);
         }
     }
 
@@ -104,8 +106,8 @@ public class HdfsCapacityMetrics {
     public static void writeHdfsMetrics() {
         prepareForWorkingDirCapacity.clear();
         // All WorkingDir capacities involved are calculated here
-        Set<String> allProjects = NProjectManager.getInstance(KYLIN_CONFIG).listAllProjects()
-                .stream().map(ProjectInstance::getName).collect(Collectors.toSet());
+        Set<String> allProjects = NProjectManager.getInstance(KYLIN_CONFIG).listAllProjects().stream()
+                .map(ProjectInstance::getName).collect(Collectors.toSet());
         try {
             for (String project : allProjects) {
                 // Should not initialize projectTotalStorageSize outside the loop, otherwise it may affect the next calculation
@@ -157,4 +159,4 @@ public class HdfsCapacityMetrics {
         }
         return -1L;
     }
-}
\ No newline at end of file
+}
diff --git a/src/core-metadata/src/test/java/org/apache/kylin/metrics/HdfsCapacityMetricsTest.java b/src/core-metadata/src/test/java/org/apache/kylin/metrics/HdfsCapacityMetricsTest.java
index b35736e3cc..0c4332707a 100644
--- a/src/core-metadata/src/test/java/org/apache/kylin/metrics/HdfsCapacityMetricsTest.java
+++ b/src/core-metadata/src/test/java/org/apache/kylin/metrics/HdfsCapacityMetricsTest.java
@@ -18,23 +18,23 @@
 
 package org.apache.kylin.metrics;
 
-import org.apache.kylin.metadata.epoch.EpochManager;
+import static org.awaitility.Awaitility.await;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.kylin.common.KylinConfig;
 import org.apache.kylin.common.util.HadoopUtil;
 import org.apache.kylin.common.util.NLocalFileMetadataTestCase;
+import org.apache.kylin.metadata.epoch.EpochManager;
 import org.awaitility.Duration;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-
-import static org.awaitility.Awaitility.await;
-
 public class HdfsCapacityMetricsTest extends NLocalFileMetadataTestCase {
 
     @Before
@@ -51,12 +51,13 @@ public class HdfsCapacityMetricsTest extends NLocalFileMetadataTestCase {
     public void testRegisterHdfsMetricsFailed() {
         HdfsCapacityMetrics.registerHdfsMetrics();
         // scheduledExecutor may like this
-        // java.util.concurrent.ScheduledThreadPoolExecutor@2d9caaeb[Running, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 0]
+        // java.util.concurrent.ScheduledThreadPoolExecutor@5bf61e67[Running, pool size = 1, active threads = 1, queued tasks = 1, completed tasks = 0]
         String scheduledExecutor = HdfsCapacityMetrics.HDFS_METRICS_SCHEDULED_EXECUTOR.toString();
-        String activeThreadStr = "active threads = ";
-        int activeThreadIdx = scheduledExecutor.indexOf(activeThreadStr);
-        String thread = scheduledExecutor.substring(activeThreadIdx + activeThreadStr.length(), activeThreadIdx + activeThreadStr.length() + 1);
-        Assert.assertEquals(0, Integer.parseInt(thread));
+        String poolSizeStr = "pool size = ";
+        int activePoolSizeIdx = scheduledExecutor.indexOf(poolSizeStr);
+        String poolSize = scheduledExecutor.substring(activePoolSizeIdx + poolSizeStr.length(),
+                activePoolSizeIdx + poolSizeStr.length() + 1);
+        Assert.assertEquals(1, Integer.parseInt(poolSize));
     }
 
     @Test
@@ -68,7 +69,8 @@ public class HdfsCapacityMetricsTest extends NLocalFileMetadataTestCase {
         String scheduledExecutor = HdfsCapacityMetrics.HDFS_METRICS_SCHEDULED_EXECUTOR.toString();
         String activeThreadStr = "active threads = ";
         int activeThreadIdx = scheduledExecutor.indexOf(activeThreadStr);
-        String thread = scheduledExecutor.substring(activeThreadIdx + activeThreadStr.length(), activeThreadIdx + activeThreadStr.length() + 1);
+        String thread = scheduledExecutor.substring(activeThreadIdx + activeThreadStr.length(),
+                activeThreadIdx + activeThreadStr.length() + 1);
         Assert.assertEquals(1, Integer.parseInt(thread));
     }