You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by br...@apache.org on 2013/09/30 08:13:27 UTC
svn commit: r1527453 - in
/hadoop/common/branches/branch-2/hadoop-hdfs-project:
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/
hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/
Author: brandonli
Date: Mon Sep 30 06:13:27 2013
New Revision: 1527453
URL: http://svn.apache.org/r1527453
Log:
HDFS-5256. Merging change r1527452 from trunk
Removed:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/LruCache.java
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java?rev=1527453&r1=1527452&r2=1527453&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java Mon Sep 30 06:13:27 2013
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.nfs.nfs3;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
+import java.util.concurrent.ExecutionException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -27,59 +28,81 @@ import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.security.UserGroupInformation;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+
/**
* A cache saves DFSClient objects for different users
*/
-public class DFSClientCache {
- static final Log LOG = LogFactory.getLog(DFSClientCache.class);
- private final LruCache<String, DFSClient> lruTable;
+class DFSClientCache {
+ private static final Log LOG = LogFactory.getLog(DFSClientCache.class);
+ /**
+ * Cache that maps User id to corresponding DFSClient.
+ */
+ @VisibleForTesting
+ final LoadingCache<String, DFSClient> clientCache;
+
+ final static int DEFAULT_DFS_CLIENT_CACHE_SIZE = 256;
+
private final Configuration config;
- public DFSClientCache(Configuration config) {
- // By default, keep 256 DFSClient instance for 256 active users
- this(config, 256);
+ DFSClientCache(Configuration config) {
+ this(config, DEFAULT_DFS_CLIENT_CACHE_SIZE);
}
- public DFSClientCache(Configuration config, int size) {
- lruTable = new LruCache<String, DFSClient>(size);
+ DFSClientCache(Configuration config, int clientCache) {
this.config = config;
+ this.clientCache = CacheBuilder.newBuilder()
+ .maximumSize(clientCache)
+ .removalListener(clientRemovealListener())
+ .build(clientLoader());
+ }
+
+ private CacheLoader<String, DFSClient> clientLoader() {
+ return new CacheLoader<String, DFSClient>() {
+ @Override
+ public DFSClient load(String userName) throws Exception {
+ UserGroupInformation ugi = UserGroupInformation
+ .createRemoteUser(userName);
+
+ // Guava requires CacheLoader never returns null.
+ return ugi.doAs(new PrivilegedExceptionAction<DFSClient>() {
+ public DFSClient run() throws IOException {
+ return new DFSClient(NameNode.getAddress(config), config);
+ }
+ });
+ }
+ };
+ }
+
+ private RemovalListener<String, DFSClient> clientRemovealListener() {
+ return new RemovalListener<String, DFSClient>() {
+ @Override
+ public void onRemoval(RemovalNotification<String, DFSClient> notification) {
+ DFSClient client = notification.getValue();
+ try {
+ client.close();
+ } catch (IOException e) {
+ LOG.warn(String.format(
+ "IOException when closing the DFSClient(%s), cause: %s", client,
+ e));
+ }
+ }
+ };
}
- public void put(String uname, DFSClient client) {
- lruTable.put(uname, client);
- }
-
- synchronized public DFSClient get(String uname) {
- DFSClient client = lruTable.get(uname);
- if (client != null) {
- return client;
- }
-
- // Not in table, create one.
+ DFSClient get(String userName) {
+ DFSClient client = null;
try {
- UserGroupInformation ugi = UserGroupInformation.createRemoteUser(uname);
- client = ugi.doAs(new PrivilegedExceptionAction<DFSClient>() {
- public DFSClient run() throws IOException {
- return new DFSClient(NameNode.getAddress(config), config);
- }
- });
- } catch (IOException e) {
- LOG.error("Create DFSClient failed for user:" + uname);
- e.printStackTrace();
-
- } catch (InterruptedException e) {
- e.printStackTrace();
+ client = clientCache.get(userName);
+ } catch (ExecutionException e) {
+ LOG.error("Failed to create DFSClient for user:" + userName + " Cause:"
+ + e);
}
- // Add new entry
- lruTable.put(uname, client);
return client;
}
-
- public int usedSize() {
- return lruTable.usedSize();
- }
-
- public boolean containsKey(String key) {
- return lruTable.containsKey(key);
- }
}
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java?rev=1527453&r1=1527452&r2=1527453&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java Mon Sep 30 06:13:27 2013
@@ -17,41 +17,44 @@
*/
package org.apache.hadoop.hdfs.nfs.nfs3;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSClient;
import org.junit.Test;
-import org.mockito.Mockito;
public class TestDFSClientCache {
@Test
- public void testLruTable() throws IOException {
- DFSClientCache cache = new DFSClientCache(new Configuration(), 3);
- DFSClient client = Mockito.mock(DFSClient.class);
- cache.put("a", client);
- assertTrue(cache.containsKey("a"));
-
- cache.put("b", client);
- cache.put("c", client);
- cache.put("d", client);
- assertTrue(cache.usedSize() == 3);
- assertFalse(cache.containsKey("a"));
-
- // Cache should have d,c,b in LRU order
- assertTrue(cache.containsKey("b"));
- // Do a lookup to make b the most recently used
- assertTrue(cache.get("b") != null);
-
- cache.put("e", client);
- assertTrue(cache.usedSize() == 3);
- // c should be replaced with e, and cache has e,b,d
- assertFalse(cache.containsKey("c"));
- assertTrue(cache.containsKey("e"));
- assertTrue(cache.containsKey("b"));
- assertTrue(cache.containsKey("d"));
+ public void testEviction() throws IOException {
+ Configuration conf = new Configuration();
+ conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost");
+
+ // Only one entry will be in the cache
+ final int MAX_CACHE_SIZE = 2;
+
+ DFSClientCache cache = new DFSClientCache(conf, MAX_CACHE_SIZE);
+
+ DFSClient c1 = cache.get("test1");
+ assertTrue(cache.get("test1").toString().contains("ugi=test1"));
+ assertEquals(c1, cache.get("test1"));
+ assertFalse(isDfsClientClose(c1));
+
+ cache.get("test2");
+ assertTrue(isDfsClientClose(c1));
+ assertEquals(MAX_CACHE_SIZE - 1, cache.clientCache.size());
+ }
+
+ private static boolean isDfsClientClose(DFSClient c) {
+ try {
+ c.exists("");
+ } catch (IOException e) {
+ return e.getMessage().equals("Filesystem closed");
+ }
+ return false;
}
}
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1527453&r1=1527452&r2=1527453&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Sep 30 06:13:27 2013
@@ -103,11 +103,14 @@ Release 2.1.2 - UNRELEASED
IMPROVEMENTS
- OPTIMIZATIONS
-
HDFS-5246. Make Hadoop nfs server port and mount daemon port
configurable. (Jinghui Wang via brandonli)
+ HDFS-5256. Use guava LoadingCache to implement DFSClientCache. (Haohui Mai
+ via brandonli)
+
+ OPTIMIZATIONS
+
BUG FIXES
HDFS-5139. Remove redundant -R option from setrep.