You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2020/04/17 04:39:36 UTC

[hadoop] branch branch-3.1 updated: HDFS-15283. Cache pool MAXTTL is not persisted and restored on cluster restart. Contributed by Stephen O'Donnell.

This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new 0ea8f3a  HDFS-15283. Cache pool MAXTTL is not persisted and restored on cluster restart. Contributed by Stephen O'Donnell.
0ea8f3a is described below

commit 0ea8f3a19cd0b841dda6ba5c4e19887d9fc9cbb4
Author: Stephen O'Donnell <so...@apache.org>
AuthorDate: Thu Apr 16 20:16:41 2020 -0700

    HDFS-15283. Cache pool MAXTTL is not persisted and restored on cluster restart. Contributed by Stephen O'Donnell.
    
    Signed-off-by: Wei-Chiu Chuang <we...@apache.org>
    (cherry picked from commit 3481895f8a9ea9f6e217a0ba158c48da89b3faf2)
    (cherry picked from commit aaad947c740c91c36139f3f0569ae78b53bca682)
    (cherry picked from commit 041c93a26cd0ff705bea9d6283951702cf2cf1c2)
---
 .../java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java | 8 ++++++++
 .../apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java   | 6 +++++-
 2 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
index ab026f0..0816517 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
@@ -1042,6 +1042,10 @@ public class CacheManager {
       if (p.getLimit() != null)
         b.setLimit(p.getLimit());
 
+      if (p.getMaxRelativeExpiryMs() != null) {
+        b.setMaxRelativeExpiry(p.getMaxRelativeExpiryMs());
+      }
+
       pools.add(b.build());
     }
 
@@ -1107,6 +1111,10 @@ public class CacheManager {
       if (p.hasLimit())
         info.setLimit(p.getLimit());
 
+      if (p.hasMaxRelativeExpiry()) {
+        info.setMaxRelativeExpiryMs(p.getMaxRelativeExpiry());
+      }
+
       addCachePool(info);
     }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
index c58e090..613aedf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
@@ -626,10 +626,12 @@ public class TestCacheDirectives {
       String groupName = "partygroup";
       FsPermission mode = new FsPermission((short)0777);
       long limit = 747;
+      long maxExpiry = 1234567890;
       dfs.addCachePool(new CachePoolInfo(pool)
           .setGroupName(groupName)
           .setMode(mode)
-          .setLimit(limit));
+          .setLimit(limit)
+          .setMaxRelativeExpiryMs(maxExpiry));
       RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
       assertTrue("No cache pools found", pit.hasNext());
       CachePoolInfo info = pit.next().getInfo();
@@ -637,6 +639,7 @@ public class TestCacheDirectives {
       assertEquals(groupName, info.getGroupName());
       assertEquals(mode, info.getMode());
       assertEquals(limit, (long)info.getLimit());
+      assertEquals(maxExpiry, (long)info.getMaxRelativeExpiryMs());
       assertFalse("Unexpected # of cache pools found", pit.hasNext());
     
       // Create some cache entries
@@ -697,6 +700,7 @@ public class TestCacheDirectives {
       assertEquals(groupName, info.getGroupName());
       assertEquals(mode, info.getMode());
       assertEquals(limit, (long)info.getLimit());
+      assertEquals(maxExpiry, (long)info.getMaxRelativeExpiryMs());
       assertFalse("Unexpected # of cache pools found", pit.hasNext());
     
       dit = dfs.listCacheDirectives(null);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org