You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ar...@apache.org on 2013/11/08 02:44:26 UTC
svn commit: r1539898 [2/3] - in
/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project:
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/
hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/
hadoop-hdfs-nfs/src/test/java/org/apache/had...
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java Fri Nov 8 01:44:24 2013
@@ -19,14 +19,13 @@ package org.apache.hadoop.hdfs.server.na
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT;
-import java.io.Closeable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
@@ -44,33 +43,22 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.IdNotFoundException;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
-import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
-import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
-import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError;
-import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.UnexpectedAddPathBasedCacheDirectiveException;
-import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry;
-import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException;
-import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException;
-import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.UnexpectedRemovePathBasedCacheDescriptorException;
-import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.RemovePermissionDeniedException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
@@ -78,6 +66,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.GSet;
import org.apache.hadoop.util.LightWeightGSet;
import org.apache.hadoop.util.Time;
@@ -111,7 +100,7 @@ public final class CacheManager {
/**
* Cache entries, sorted by ID.
*
- * listPathBasedCacheDescriptors relies on the ordering of elements in this map
+ * listPathBasedCacheDirectives relies on the ordering of elements in this map
* to track what has already been listed by the client.
*/
private final TreeMap<Long, PathBasedCacheEntry> entriesById =
@@ -143,7 +132,7 @@ public final class CacheManager {
/**
* Maximum number of cache pool directives to list in one operation.
*/
- private final int maxListCacheDescriptorsResponses;
+ private final int maxListCacheDirectivesNumResponses;
/**
* Interval between scans in milliseconds.
@@ -191,9 +180,9 @@ public final class CacheManager {
this.maxListCachePoolsResponses = conf.getInt(
DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
- this.maxListCacheDescriptorsResponses = conf.getInt(
- DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES,
- DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES_DEFAULT);
+ this.maxListCacheDirectivesNumResponses = conf.getInt(
+ DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
+ DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
scanIntervalMs = conf.getLong(
DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
@@ -266,132 +255,239 @@ public final class CacheManager {
return nextEntryId++;
}
- public PathBasedCacheDescriptor addDirective(
- PathBasedCacheDirective directive, FSPermissionChecker pc)
- throws IOException {
- assert namesystem.hasWriteLock();
- CachePool pool = cachePools.get(directive.getPool());
- if (pool == null) {
- LOG.info("addDirective " + directive + ": pool not found.");
- throw new InvalidPoolNameError(directive);
- }
- if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) {
- LOG.info("addDirective " + directive + ": write permission denied.");
- throw new PoolWritePermissionDeniedError(directive);
- }
- try {
- directive.validate();
- } catch (IOException ioe) {
- LOG.info("addDirective " + directive + ": validation failed: "
- + ioe.getClass().getName() + ": " + ioe.getMessage());
- throw ioe;
- }
-
- // Add a new entry with the next available ID.
- PathBasedCacheEntry entry;
- try {
- entry = new PathBasedCacheEntry(getNextEntryId(),
- directive.getPath().toUri().getPath(),
- directive.getReplication(), pool);
- } catch (IOException ioe) {
- throw new UnexpectedAddPathBasedCacheDirectiveException(directive);
- }
- LOG.info("addDirective " + directive + ": added cache directive "
- + directive);
-
- // Success!
- // First, add it to the various maps
+ private void addInternal(PathBasedCacheEntry entry) {
entriesById.put(entry.getEntryId(), entry);
- String path = directive.getPath().toUri().getPath();
+ String path = entry.getPath();
List<PathBasedCacheEntry> entryList = entriesByPath.get(path);
if (entryList == null) {
entryList = new ArrayList<PathBasedCacheEntry>(1);
entriesByPath.put(path, entryList);
}
entryList.add(entry);
+ }
+
+ public PathBasedCacheDirective addDirective(
+ PathBasedCacheDirective directive, FSPermissionChecker pc)
+ throws IOException {
+ assert namesystem.hasWriteLock();
+ PathBasedCacheEntry entry;
+ try {
+ if (directive.getPool() == null) {
+ throw new IdNotFoundException("addDirective: no pool was specified.");
+ }
+ if (directive.getPool().isEmpty()) {
+ throw new IdNotFoundException("addDirective: pool name was empty.");
+ }
+ CachePool pool = cachePools.get(directive.getPool());
+ if (pool == null) {
+ throw new IdNotFoundException("addDirective: no such pool as " +
+ directive.getPool());
+ }
+ if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) {
+ throw new AccessControlException("addDirective: write " +
+ "permission denied for pool " + directive.getPool());
+ }
+ if (directive.getPath() == null) {
+ throw new IOException("addDirective: no path was specified.");
+ }
+ String path = directive.getPath().toUri().getPath();
+ if (!DFSUtil.isValidName(path)) {
+ throw new IOException("addDirective: path '" + path + "' is invalid.");
+ }
+ short replication = directive.getReplication() == null ?
+ (short)1 : directive.getReplication();
+ if (replication <= 0) {
+ throw new IOException("addDirective: replication " + replication +
+ " is invalid.");
+ }
+ long id;
+ if (directive.getId() != null) {
+ // We are loading an entry from the edit log.
+ // Use the ID from the edit log.
+ id = directive.getId();
+ } else {
+ // Add a new entry with the next available ID.
+ id = getNextEntryId();
+ }
+ entry = new PathBasedCacheEntry(id, path, replication, pool);
+ addInternal(entry);
+ } catch (IOException e) {
+ LOG.warn("addDirective " + directive + ": failed.", e);
+ throw e;
+ }
+ LOG.info("addDirective " + directive + ": succeeded.");
if (monitor != null) {
monitor.kick();
}
- return entry.getDescriptor();
+ return entry.toDirective();
}
- public void removeDescriptor(long id, FSPermissionChecker pc)
- throws IOException {
+ public void modifyDirective(PathBasedCacheDirective directive,
+ FSPermissionChecker pc) throws IOException {
assert namesystem.hasWriteLock();
- // Check for invalid IDs.
- if (id <= 0) {
- LOG.info("removeDescriptor " + id + ": invalid non-positive " +
- "descriptor ID.");
- throw new InvalidIdException(id);
- }
- // Find the entry.
- PathBasedCacheEntry existing = entriesById.get(id);
- if (existing == null) {
- LOG.info("removeDescriptor " + id + ": entry not found.");
- throw new NoSuchIdException(id);
- }
- CachePool pool = cachePools.get(existing.getDescriptor().getPool());
- if (pool == null) {
- LOG.info("removeDescriptor " + id + ": pool not found for directive " +
- existing.getDescriptor());
- throw new UnexpectedRemovePathBasedCacheDescriptorException(id);
- }
- if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) {
- LOG.info("removeDescriptor " + id + ": write permission denied to " +
- "pool " + pool + " for entry " + existing);
- throw new RemovePermissionDeniedException(id);
+ String idString =
+ (directive.getId() == null) ?
+ "(null)" : directive.getId().toString();
+ try {
+ // Check for invalid IDs.
+ Long id = directive.getId();
+ if (id == null) {
+ throw new IdNotFoundException("modifyDirective: " +
+ "no ID to modify was supplied.");
+ }
+ if (id <= 0) {
+ throw new IdNotFoundException("modifyDirective " + id +
+ ": invalid non-positive directive ID.");
+ }
+ // Find the entry.
+ PathBasedCacheEntry prevEntry = entriesById.get(id);
+ if (prevEntry == null) {
+ throw new IdNotFoundException("modifyDirective " + id +
+ ": id not found.");
+ }
+ if ((pc != null) &&
+ (!pc.checkPermission(prevEntry.getPool(), FsAction.WRITE))) {
+ throw new AccessControlException("modifyDirective " + id +
+ ": permission denied for initial pool " + prevEntry.getPool());
+ }
+ String path = prevEntry.getPath();
+ if (directive.getPath() != null) {
+ path = directive.getPath().toUri().getPath();
+ if (!DFSUtil.isValidName(path)) {
+ throw new IOException("modifyDirective " + id + ": new path " +
+ path + " is not valid.");
+ }
+ }
+ short replication = (directive.getReplication() != null) ?
+ directive.getReplication() : prevEntry.getReplication();
+ if (replication <= 0) {
+ throw new IOException("modifyDirective: replication " + replication +
+ " is invalid.");
+ }
+ CachePool pool = prevEntry.getPool();
+ if (directive.getPool() != null) {
+ pool = cachePools.get(directive.getPool());
+ if (pool == null) {
+ throw new IdNotFoundException("modifyDirective " + id +
+ ": pool " + directive.getPool() + " not found.");
+ }
+ if (directive.getPool().isEmpty()) {
+ throw new IdNotFoundException("modifyDirective: pool name was " +
+ "empty.");
+ }
+ if ((pc != null) &&
+ (!pc.checkPermission(pool, FsAction.WRITE))) {
+ throw new AccessControlException("modifyDirective " + id +
+ ": permission denied for target pool " + pool);
+ }
+ }
+ removeInternal(prevEntry);
+ PathBasedCacheEntry newEntry =
+ new PathBasedCacheEntry(id, path, replication, pool);
+ addInternal(newEntry);
+ } catch (IOException e) {
+ LOG.warn("modifyDirective " + idString + ": failed.", e);
+ throw e;
}
-
+ LOG.info("modifyDirective " + idString + ": successfully applied " +
+ directive);
+ }
+
+ public void removeInternal(PathBasedCacheEntry existing)
+ throws IOException {
+ assert namesystem.hasWriteLock();
// Remove the corresponding entry in entriesByPath.
- String path = existing.getDescriptor().getPath().toUri().getPath();
+ String path = existing.getPath();
List<PathBasedCacheEntry> entries = entriesByPath.get(path);
if (entries == null || !entries.remove(existing)) {
- throw new UnexpectedRemovePathBasedCacheDescriptorException(id);
+ throw new IdNotFoundException("removeInternal: failed to locate entry " +
+ existing.getEntryId() + " by path " + existing.getPath());
}
if (entries.size() == 0) {
entriesByPath.remove(path);
}
- entriesById.remove(id);
+ entriesById.remove(existing.getEntryId());
+ }
+
+ public void removeDirective(long id, FSPermissionChecker pc)
+ throws IOException {
+ assert namesystem.hasWriteLock();
+ try {
+ // Check for invalid IDs.
+ if (id <= 0) {
+ throw new IdNotFoundException("removeDirective " + id + ": invalid " +
+ "non-positive directive ID.");
+ }
+ // Find the entry.
+ PathBasedCacheEntry existing = entriesById.get(id);
+ if (existing == null) {
+ throw new IdNotFoundException("removeDirective " + id +
+ ": id not found.");
+ }
+ if ((pc != null) &&
+ (!pc.checkPermission(existing.getPool(), FsAction.WRITE))) {
+ throw new AccessControlException("removeDirective " + id +
+ ": write permission denied on pool " +
+ existing.getPool().getPoolName());
+ }
+ removeInternal(existing);
+ } catch (IOException e) {
+ LOG.warn("removeDirective " + id + " failed.", e);
+ throw e;
+ }
if (monitor != null) {
monitor.kick();
}
- LOG.info("removeDescriptor successful for PathCacheEntry id " + id);
+ LOG.info("removeDirective " + id + ": succeeded.");
}
- public BatchedListEntries<PathBasedCacheDescriptor>
- listPathBasedCacheDescriptors(long prevId, String filterPool,
- String filterPath, FSPermissionChecker pc) throws IOException {
+ public BatchedListEntries<PathBasedCacheDirective>
+ listPathBasedCacheDirectives(long prevId,
+ PathBasedCacheDirective filter,
+ FSPermissionChecker pc) throws IOException {
assert namesystem.hasReadOrWriteLock();
final int NUM_PRE_ALLOCATED_ENTRIES = 16;
- if (filterPath != null) {
+ String filterPath = null;
+ if (filter.getId() != null) {
+ throw new IOException("we currently don't support filtering by ID");
+ }
+ if (filter.getPath() != null) {
+ filterPath = filter.getPath().toUri().getPath();
if (!DFSUtil.isValidName(filterPath)) {
- throw new IOException("invalid path name '" + filterPath + "'");
+ throw new IOException("listPathBasedCacheDirectives: invalid " +
+ "path name '" + filterPath + "'");
}
}
- ArrayList<PathBasedCacheDescriptor> replies =
- new ArrayList<PathBasedCacheDescriptor>(NUM_PRE_ALLOCATED_ENTRIES);
+ if (filter.getReplication() != null) {
+ throw new IOException("we currently don't support filtering " +
+ "by replication");
+ }
+ ArrayList<PathBasedCacheDirective> replies =
+ new ArrayList<PathBasedCacheDirective>(NUM_PRE_ALLOCATED_ENTRIES);
int numReplies = 0;
- SortedMap<Long, PathBasedCacheEntry> tailMap = entriesById.tailMap(prevId + 1);
+ SortedMap<Long, PathBasedCacheEntry> tailMap =
+ entriesById.tailMap(prevId + 1);
for (Entry<Long, PathBasedCacheEntry> cur : tailMap.entrySet()) {
- if (numReplies >= maxListCacheDescriptorsResponses) {
- return new BatchedListEntries<PathBasedCacheDescriptor>(replies, true);
+ if (numReplies >= maxListCacheDirectivesNumResponses) {
+ return new BatchedListEntries<PathBasedCacheDirective>(replies, true);
}
PathBasedCacheEntry curEntry = cur.getValue();
- PathBasedCacheDirective directive = cur.getValue().getDescriptor();
- if (filterPool != null &&
- !directive.getPool().equals(filterPool)) {
+ PathBasedCacheDirective directive = cur.getValue().toDirective();
+ if (filter.getPool() != null &&
+ !directive.getPool().equals(filter.getPool())) {
continue;
}
if (filterPath != null &&
!directive.getPath().toUri().getPath().equals(filterPath)) {
continue;
}
- if (pc.checkPermission(curEntry.getPool(), FsAction.READ)) {
- replies.add(cur.getValue().getDescriptor());
+ if ((pc == null) ||
+ (pc.checkPermission(curEntry.getPool(), FsAction.READ))) {
+ replies.add(cur.getValue().toDirective());
numReplies++;
}
}
- return new BatchedListEntries<PathBasedCacheDescriptor>(replies, false);
+ return new BatchedListEntries<PathBasedCacheDirective>(replies, false);
}
/**
@@ -553,7 +649,8 @@ public final class CacheManager {
blockManager.getDatanodeManager().getDatanode(datanodeID);
if (datanode == null || !datanode.isAlive) {
throw new IOException(
- "processCacheReport from dead or unregistered datanode: " + datanode);
+ "processCacheReport from dead or unregistered datanode: " +
+ datanode);
}
processCacheReportImpl(datanode, blockIds);
} finally {
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Fri Nov 8 01:44:24 2013
@@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.DFSConfigK
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
@@ -47,6 +46,7 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddPathBasedCacheDirectiveOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyPathBasedCacheDirectiveOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
@@ -63,7 +63,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemovePathBasedCacheDescriptorOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemovePathBasedCacheDirectiveOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
@@ -956,18 +956,25 @@ public class FSEditLog implements LogsPu
void logAddPathBasedCacheDirective(PathBasedCacheDirective directive,
boolean toLogRpcIds) {
- AddPathBasedCacheDirectiveOp op = AddPathBasedCacheDirectiveOp.getInstance(
- cache.get())
- .setPath(directive.getPath().toUri().getPath())
- .setReplication(directive.getReplication())
- .setPool(directive.getPool());
+ AddPathBasedCacheDirectiveOp op =
+ AddPathBasedCacheDirectiveOp.getInstance(cache.get())
+ .setDirective(directive);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
- void logRemovePathBasedCacheDescriptor(Long id, boolean toLogRpcIds) {
- RemovePathBasedCacheDescriptorOp op =
- RemovePathBasedCacheDescriptorOp.getInstance(cache.get()).setId(id);
+ void logModifyPathBasedCacheDirective(
+ PathBasedCacheDirective directive, boolean toLogRpcIds) {
+ ModifyPathBasedCacheDirectiveOp op =
+ ModifyPathBasedCacheDirectiveOp.getInstance(
+ cache.get()).setDirective(directive);
+ logRpcIds(op, toLogRpcIds);
+ logEdit(op);
+ }
+
+ void logRemovePathBasedCacheDirective(Long id, boolean toLogRpcIds) {
+ RemovePathBasedCacheDirectiveOp op =
+ RemovePathBasedCacheDirectiveOp.getInstance(cache.get()).setId(id);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Fri Nov 8 01:44:24 2013
@@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
@@ -58,9 +57,10 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyPathBasedCacheDirectiveOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemovePathBasedCacheDescriptorOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemovePathBasedCacheDirectiveOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
@@ -642,23 +642,28 @@ public class FSEditLogLoader {
}
case OP_ADD_PATH_BASED_CACHE_DIRECTIVE: {
AddPathBasedCacheDirectiveOp addOp = (AddPathBasedCacheDirectiveOp) op;
- PathBasedCacheDirective d = new PathBasedCacheDirective.Builder().
- setPath(new Path(addOp.path)).
- setReplication(addOp.replication).
- setPool(addOp.pool).
- build();
- PathBasedCacheDescriptor descriptor =
- fsNamesys.getCacheManager().addDirective(d, null);
- if (toAddRetryCache) {
- fsNamesys.addCacheEntryWithPayload(op.rpcClientId, op.rpcCallId,
- descriptor);
+ PathBasedCacheDirective result = fsNamesys.
+ getCacheManager().addDirective(addOp.directive, null);
+ if (toAddRetryCache) {
+ Long id = result.getId();
+ fsNamesys.addCacheEntryWithPayload(op.rpcClientId, op.rpcCallId, id);
+ }
+ break;
+ }
+ case OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE: {
+ ModifyPathBasedCacheDirectiveOp modifyOp =
+ (ModifyPathBasedCacheDirectiveOp) op;
+ fsNamesys.getCacheManager().modifyDirective(
+ modifyOp.directive, null);
+ if (toAddRetryCache) {
+ fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId);
}
break;
}
- case OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR: {
- RemovePathBasedCacheDescriptorOp removeOp =
- (RemovePathBasedCacheDescriptorOp) op;
- fsNamesys.getCacheManager().removeDescriptor(removeOp.id, null);
+ case OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE: {
+ RemovePathBasedCacheDirectiveOp removeOp =
+ (RemovePathBasedCacheDirectiveOp) op;
+ fsNamesys.getCacheManager().removeDirective(removeOp.id, null);
if (toAddRetryCache) {
fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId);
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Fri Nov 8 01:44:24 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_CACHE_POOL;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_PATH_BASED_CACHE_DIRECTIVE;
+import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOCATE_BLOCK_ID;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOW_SNAPSHOT;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN;
@@ -37,7 +38,7 @@ import static org.apache.hadoop.hdfs.ser
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_CACHE_POOL;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REASSIGN_LEASE;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_CACHE_POOL;
-import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR;
+import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_OLD;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_SNAPSHOT;
@@ -74,6 +75,7 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -84,6 +86,7 @@ import org.apache.hadoop.hdfs.protocol.C
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
+import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.util.XMLUtils;
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
@@ -164,8 +167,10 @@ public abstract class FSEditLogOp {
inst.put(OP_ALLOCATE_BLOCK_ID, new AllocateBlockIdOp());
inst.put(OP_ADD_PATH_BASED_CACHE_DIRECTIVE,
new AddPathBasedCacheDirectiveOp());
- inst.put(OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR,
- new RemovePathBasedCacheDescriptorOp());
+ inst.put(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE,
+ new ModifyPathBasedCacheDirectiveOp());
+ inst.put(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE,
+ new RemovePathBasedCacheDirectiveOp());
inst.put(OP_ADD_CACHE_POOL, new AddCachePoolOp());
inst.put(OP_MODIFY_CACHE_POOL, new ModifyCachePoolOp());
inst.put(OP_REMOVE_CACHE_POOL, new RemoveCachePoolOp());
@@ -2866,9 +2871,7 @@ public abstract class FSEditLogOp {
* {@link ClientProtocol#addPathBasedCacheDirective}
*/
static class AddPathBasedCacheDirectiveOp extends FSEditLogOp {
- String path;
- short replication;
- String pool;
+ PathBasedCacheDirective directive;
public AddPathBasedCacheDirectiveOp() {
super(OP_ADD_PATH_BASED_CACHE_DIRECTIVE);
@@ -2879,61 +2882,199 @@ public abstract class FSEditLogOp {
.get(OP_ADD_PATH_BASED_CACHE_DIRECTIVE);
}
- public AddPathBasedCacheDirectiveOp setPath(String path) {
- this.path = path;
+ public AddPathBasedCacheDirectiveOp setDirective(
+ PathBasedCacheDirective directive) {
+ this.directive = directive;
+ assert(directive.getId() != null);
+ assert(directive.getPath() != null);
+ assert(directive.getReplication() != null);
+ assert(directive.getPool() != null);
return this;
}
- public AddPathBasedCacheDirectiveOp setReplication(short replication) {
- this.replication = replication;
- return this;
+ @Override
+ void readFields(DataInputStream in, int logVersion) throws IOException {
+ long id = FSImageSerialization.readLong(in);
+ String path = FSImageSerialization.readString(in);
+ short replication = FSImageSerialization.readShort(in);
+ String pool = FSImageSerialization.readString(in);
+ directive = new PathBasedCacheDirective.Builder().
+ setId(id).
+ setPath(new Path(path)).
+ setReplication(replication).
+ setPool(pool).
+ build();
+ readRpcIds(in, logVersion);
}
- public AddPathBasedCacheDirectiveOp setPool(String pool) {
- this.pool = pool;
+ @Override
+ public void writeFields(DataOutputStream out) throws IOException {
+ FSImageSerialization.writeLong(directive.getId(), out);
+ FSImageSerialization.writeString(directive.getPath().toUri().getPath(), out);
+ FSImageSerialization.writeShort(directive.getReplication(), out);
+ FSImageSerialization.writeString(directive.getPool(), out);
+ writeRpcIds(rpcClientId, rpcCallId, out);
+ }
+
+ @Override
+ protected void toXml(ContentHandler contentHandler) throws SAXException {
+ XMLUtils.addSaxString(contentHandler, "ID",
+ directive.getId().toString());
+ XMLUtils.addSaxString(contentHandler, "PATH",
+ directive.getPath().toUri().getPath());
+ XMLUtils.addSaxString(contentHandler, "REPLICATION",
+ Short.toString(directive.getReplication()));
+ XMLUtils.addSaxString(contentHandler, "POOL", directive.getPool());
+ appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
+ }
+
+ @Override
+ void fromXml(Stanza st) throws InvalidXmlException {
+ directive = new PathBasedCacheDirective.Builder().
+ setId(Long.parseLong(st.getValue("ID"))).
+ setPath(new Path(st.getValue("PATH"))).
+ setReplication(Short.parseShort(st.getValue("REPLICATION"))).
+ setPool(st.getValue("POOL")).
+ build();
+ readRpcIdsFromXml(st);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("AddPathBasedCacheDirective [");
+ builder.append("id=" + directive.getId() + ",");
+ builder.append("path=" + directive.getPath().toUri().getPath() + ",");
+ builder.append("replication=" + directive.getReplication() + ",");
+ builder.append("pool=" + directive.getPool());
+ appendRpcIdsToString(builder, rpcClientId, rpcCallId);
+ builder.append("]");
+ return builder.toString();
+ }
+ }
+
+ /**
+ * {@literal @AtMostOnce} for
+ * {@link ClientProtocol#modifyPathBasedCacheDirective}
+ */
+ static class ModifyPathBasedCacheDirectiveOp extends FSEditLogOp {
+ PathBasedCacheDirective directive;
+
+ public ModifyPathBasedCacheDirectiveOp() {
+ super(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE);
+ }
+
+ static ModifyPathBasedCacheDirectiveOp getInstance(OpInstanceCache cache) {
+ return (ModifyPathBasedCacheDirectiveOp) cache
+ .get(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE);
+ }
+
+ public ModifyPathBasedCacheDirectiveOp setDirective(
+ PathBasedCacheDirective directive) {
+ this.directive = directive;
+ assert(directive.getId() != null);
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
- this.path = FSImageSerialization.readString(in);
- this.replication = FSImageSerialization.readShort(in);
- this.pool = FSImageSerialization.readString(in);
+ PathBasedCacheDirective.Builder builder =
+ new PathBasedCacheDirective.Builder();
+ builder.setId(FSImageSerialization.readLong(in));
+ byte flags = in.readByte();
+ if ((flags & 0x1) != 0) {
+ builder.setPath(new Path(FSImageSerialization.readString(in)));
+ }
+ if ((flags & 0x2) != 0) {
+ builder.setReplication(FSImageSerialization.readShort(in));
+ }
+ if ((flags & 0x4) != 0) {
+ builder.setPool(FSImageSerialization.readString(in));
+ }
+ if ((flags & ~0x7) != 0) {
+ throw new IOException("unknown flags set in " +
+ "ModifyPathBasedCacheDirectiveOp: " + flags);
+ }
+ this.directive = builder.build();
readRpcIds(in, logVersion);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
- FSImageSerialization.writeString(path, out);
- FSImageSerialization.writeShort(replication, out);
- FSImageSerialization.writeString(pool, out);
+ FSImageSerialization.writeLong(directive.getId(), out);
+ byte flags = (byte)(
+ ((directive.getPath() != null) ? 0x1 : 0) |
+ ((directive.getReplication() != null) ? 0x2 : 0) |
+ ((directive.getPool() != null) ? 0x4 : 0)
+ );
+ out.writeByte(flags);
+ if (directive.getPath() != null) {
+ FSImageSerialization.writeString(
+ directive.getPath().toUri().getPath(), out);
+ }
+ if (directive.getReplication() != null) {
+ FSImageSerialization.writeShort(directive.getReplication(), out);
+ }
+ if (directive.getPool() != null) {
+ FSImageSerialization.writeString(directive.getPool(), out);
+ }
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
- XMLUtils.addSaxString(contentHandler, "PATH", path);
- XMLUtils.addSaxString(contentHandler, "REPLICATION",
- Short.toString(replication));
- XMLUtils.addSaxString(contentHandler, "POOL", pool);
+ XMLUtils.addSaxString(contentHandler, "ID",
+ Long.toString(directive.getId()));
+ if (directive.getPath() != null) {
+ XMLUtils.addSaxString(contentHandler, "PATH",
+ directive.getPath().toUri().getPath());
+ }
+ if (directive.getReplication() != null) {
+ XMLUtils.addSaxString(contentHandler, "REPLICATION",
+ Short.toString(directive.getReplication()));
+ }
+ if (directive.getPool() != null) {
+ XMLUtils.addSaxString(contentHandler, "POOL", directive.getPool());
+ }
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
- path = st.getValue("PATH");
- replication = Short.parseShort(st.getValue("REPLICATION"));
- pool = st.getValue("POOL");
+ PathBasedCacheDirective.Builder builder =
+ new PathBasedCacheDirective.Builder();
+ builder.setId(Long.parseLong(st.getValue("ID")));
+ String path = st.getValueOrNull("PATH");
+ if (path != null) {
+ builder.setPath(new Path(path));
+ }
+ String replicationString = st.getValueOrNull("REPLICATION");
+ if (replicationString != null) {
+ builder.setReplication(Short.parseShort(replicationString));
+ }
+ String pool = st.getValueOrNull("POOL");
+ if (pool != null) {
+ builder.setPool(pool);
+ }
+ this.directive = builder.build();
readRpcIdsFromXml(st);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
- builder.append("AddPathBasedCacheDirective [");
- builder.append("path=" + path + ",");
- builder.append("replication=" + replication + ",");
- builder.append("pool=" + pool);
+ builder.append("ModifyPathBasedCacheDirectiveOp[");
+ builder.append("id=").append(directive.getId());
+ if (directive.getPath() != null) {
+ builder.append(",").append("path=").append(directive.getPath());
+ }
+ if (directive.getReplication() != null) {
+ builder.append(",").append("replication=").
+ append(directive.getReplication());
+ }
+ if (directive.getPool() != null) {
+ builder.append(",").append("pool=").append(directive.getPool());
+ }
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append("]");
return builder.toString();
@@ -2942,21 +3083,21 @@ public abstract class FSEditLogOp {
/**
* {@literal @AtMostOnce} for
- * {@link ClientProtocol#removePathBasedCacheDescriptor}
+ * {@link ClientProtocol#removePathBasedCacheDirective}
*/
- static class RemovePathBasedCacheDescriptorOp extends FSEditLogOp {
+ static class RemovePathBasedCacheDirectiveOp extends FSEditLogOp {
long id;
- public RemovePathBasedCacheDescriptorOp() {
- super(OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR);
+ public RemovePathBasedCacheDirectiveOp() {
+ super(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE);
}
- static RemovePathBasedCacheDescriptorOp getInstance(OpInstanceCache cache) {
- return (RemovePathBasedCacheDescriptorOp) cache
- .get(OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR);
+ static RemovePathBasedCacheDirectiveOp getInstance(OpInstanceCache cache) {
+ return (RemovePathBasedCacheDirectiveOp) cache
+ .get(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE);
}
- public RemovePathBasedCacheDescriptorOp setId(long id) {
+ public RemovePathBasedCacheDirectiveOp setId(long id) {
this.id = id;
return this;
}
@@ -2988,7 +3129,7 @@ public abstract class FSEditLogOp {
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
- builder.append("RemovePathBasedCacheDescriptor [");
+ builder.append("RemovePathBasedCacheDirective [");
builder.append("id=" + Long.toString(id));
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append("]");
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java Fri Nov 8 01:44:24 2013
@@ -64,11 +64,12 @@ public enum FSEditLogOpCodes {
OP_DISALLOW_SNAPSHOT ((byte) 30),
OP_SET_GENSTAMP_V2 ((byte) 31),
OP_ALLOCATE_BLOCK_ID ((byte) 32),
- OP_ADD_PATH_BASED_CACHE_DIRECTIVE ((byte) 33),
- OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR ((byte) 34),
- OP_ADD_CACHE_POOL ((byte) 35),
- OP_MODIFY_CACHE_POOL ((byte) 36),
- OP_REMOVE_CACHE_POOL ((byte) 37);
+ OP_ADD_PATH_BASED_CACHE_DIRECTIVE ((byte) 33),
+ OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE ((byte) 34),
+ OP_ADD_CACHE_POOL ((byte) 35),
+ OP_MODIFY_CACHE_POOL ((byte) 36),
+ OP_REMOVE_CACHE_POOL ((byte) 37),
+ OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE ((byte) 38);
private byte opCode;
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Nov 8 01:44:24 2013
@@ -153,7 +153,6 @@ import org.apache.hadoop.hdfs.StorageTyp
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -7000,7 +6999,7 @@ public class FSNamesystem implements Nam
}
}
- PathBasedCacheDescriptor addPathBasedCacheDirective(
+ long addPathBasedCacheDirective(
PathBasedCacheDirective directive) throws IOException {
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = isPermissionEnabled ?
@@ -7008,20 +7007,26 @@ public class FSNamesystem implements Nam
CacheEntryWithPayload cacheEntry =
RetryCache.waitForCompletion(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
- return (PathBasedCacheDescriptor) cacheEntry.getPayload();
+ return (Long) cacheEntry.getPayload();
}
boolean success = false;
- PathBasedCacheDescriptor result = null;
writeLock();
+ Long result = null;
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot add PathBasedCache directive", safeMode);
}
- result = cacheManager.addDirective(directive, pc);
- getEditLog().logAddPathBasedCacheDirective(directive,
+ if (directive.getId() != null) {
+ throw new IOException("addDirective: you cannot specify an ID " +
+ "for this operation.");
+ }
+ PathBasedCacheDirective effectiveDirective =
+ cacheManager.addDirective(directive, pc);
+ getEditLog().logAddPathBasedCacheDirective(effectiveDirective,
cacheEntry != null);
+ result = effectiveDirective.getId();
success = true;
} finally {
writeUnlock();
@@ -7036,7 +7041,40 @@ public class FSNamesystem implements Nam
return result;
}
- void removePathBasedCacheDescriptor(Long id) throws IOException {
+ void modifyPathBasedCacheDirective(
+ PathBasedCacheDirective directive) throws IOException {
+ checkOperation(OperationCategory.WRITE);
+ final FSPermissionChecker pc = isPermissionEnabled ?
+ getPermissionChecker() : null;
+ boolean success = false;
+ CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+ if (cacheEntry != null && cacheEntry.isSuccess()) {
+ return;
+ }
+ writeLock();
+ try {
+ checkOperation(OperationCategory.WRITE);
+ if (isInSafeMode()) {
+ throw new SafeModeException(
+ "Cannot add PathBasedCache directive", safeMode);
+ }
+ cacheManager.modifyDirective(directive, pc);
+ getEditLog().logModifyPathBasedCacheDirective(directive,
+ cacheEntry != null);
+ success = true;
+ } finally {
+ writeUnlock();
+ if (success) {
+ getEditLog().logSync();
+ }
+ if (isAuditEnabled() && isExternalInvocation()) {
+ logAuditEvent(success, "addPathBasedCacheDirective", null, null, null);
+ }
+ RetryCache.setState(cacheEntry, success);
+ }
+ }
+
+ void removePathBasedCacheDirective(Long id) throws IOException {
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = isPermissionEnabled ?
getPermissionChecker() : null;
@@ -7052,13 +7090,13 @@ public class FSNamesystem implements Nam
throw new SafeModeException(
"Cannot remove PathBasedCache directives", safeMode);
}
- cacheManager.removeDescriptor(id, pc);
- getEditLog().logRemovePathBasedCacheDescriptor(id, cacheEntry != null);
+ cacheManager.removeDirective(id, pc);
+ getEditLog().logRemovePathBasedCacheDirective(id, cacheEntry != null);
success = true;
} finally {
writeUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
- logAuditEvent(success, "removePathBasedCacheDescriptor", null, null,
+ logAuditEvent(success, "removePathBasedCacheDirective", null, null,
null);
}
RetryCache.setState(cacheEntry, success);
@@ -7066,23 +7104,23 @@ public class FSNamesystem implements Nam
getEditLog().logSync();
}
- BatchedListEntries<PathBasedCacheDescriptor> listPathBasedCacheDescriptors(
- long startId, String pool, String path) throws IOException {
+ BatchedListEntries<PathBasedCacheDirective> listPathBasedCacheDirectives(
+ long startId, PathBasedCacheDirective filter) throws IOException {
checkOperation(OperationCategory.READ);
final FSPermissionChecker pc = isPermissionEnabled ?
getPermissionChecker() : null;
- BatchedListEntries<PathBasedCacheDescriptor> results;
+ BatchedListEntries<PathBasedCacheDirective> results;
readLock();
boolean success = false;
try {
checkOperation(OperationCategory.READ);
results =
- cacheManager.listPathBasedCacheDescriptors(startId, pool, path, pc);
+ cacheManager.listPathBasedCacheDirectives(startId, filter, pc);
success = true;
} finally {
readUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
- logAuditEvent(success, "listPathBasedCacheDescriptors", null, null,
+ logAuditEvent(success, "listPathBasedCacheDirectives", null, null,
null);
}
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Fri Nov 8 01:44:24 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
+import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
@@ -31,6 +32,7 @@ import org.apache.hadoop.hdfs.server.blo
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.namenode.Quota.Counts;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@@ -132,6 +134,39 @@ public class INodeFileUnderConstruction
}
@Override
+ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
+ final BlocksMapUpdateInfo collectedBlocks,
+ final List<INode> removedINodes, final boolean countDiffChange)
+ throws QuotaExceededException {
+ if (snapshot == null && prior != null) {
+ cleanZeroSizeBlock(collectedBlocks);
+ return Counts.newInstance();
+ } else {
+ return super.cleanSubtree(snapshot, prior, collectedBlocks,
+ removedINodes, countDiffChange);
+ }
+ }
+
+ /**
+ * When deleting a file in the current fs directory, and the file is contained
+ * in a snapshot, we should delete the last block if it's under construction
+ * and its size is 0.
+ */
+ private void cleanZeroSizeBlock(final BlocksMapUpdateInfo collectedBlocks) {
+ final BlockInfo[] blocks = getBlocks();
+ if (blocks != null && blocks.length > 0
+ && blocks[blocks.length - 1] instanceof BlockInfoUnderConstruction) {
+ BlockInfoUnderConstruction lastUC =
+ (BlockInfoUnderConstruction) blocks[blocks.length - 1];
+ if (lastUC.getNumBytes() == 0) {
+ // this is a 0-sized block. do not need check its UC state here
+ collectedBlocks.addDeleteBlock(lastUC);
+ removeLastBlock(lastUC);
+ }
+ }
+ }
+
+ @Override
public INodeFileUnderConstruction recordModification(final Snapshot latest,
final INodeMap inodeMap) throws QuotaExceededException {
if (isInLatestSnapshot(latest)) {
@@ -158,7 +193,7 @@ public class INodeFileUnderConstruction
* Remove a block from the block list. This block should be
* the last one on the list.
*/
- boolean removeLastBlock(Block oldblock) throws IOException {
+ boolean removeLastBlock(Block oldblock) {
final BlockInfo[] blocks = getBlocks();
if (blocks == null || blocks.length == 0) {
return false;
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Fri Nov 8 01:44:24 2013
@@ -62,7 +62,6 @@ import org.apache.hadoop.hdfs.HDFSPolicy
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -1237,46 +1236,52 @@ class NameNodeRpcServer implements Namen
}
@Override
- public PathBasedCacheDescriptor addPathBasedCacheDirective(
+ public long addPathBasedCacheDirective(
PathBasedCacheDirective path) throws IOException {
return namesystem.addPathBasedCacheDirective(path);
}
@Override
- public void removePathBasedCacheDescriptor(Long id) throws IOException {
- namesystem.removePathBasedCacheDescriptor(id);
+ public void modifyPathBasedCacheDirective(
+ PathBasedCacheDirective directive) throws IOException {
+ namesystem.modifyPathBasedCacheDirective(directive);
}
- private class ServerSidePathBasedCacheEntriesIterator
- extends BatchedRemoteIterator<Long, PathBasedCacheDescriptor> {
-
- private final String pool;
+ @Override
+ public void removePathBasedCacheDirective(long id) throws IOException {
+ namesystem.removePathBasedCacheDirective(id);
+ }
- private final String path;
+ private class ServerSidePathBasedCacheEntriesIterator
+ extends BatchedRemoteIterator<Long, PathBasedCacheDirective> {
- public ServerSidePathBasedCacheEntriesIterator(Long firstKey, String pool,
- String path) {
+ private final PathBasedCacheDirective filter;
+
+ public ServerSidePathBasedCacheEntriesIterator(Long firstKey,
+ PathBasedCacheDirective filter) {
super(firstKey);
- this.pool = pool;
- this.path = path;
+ this.filter = filter;
}
@Override
- public BatchedEntries<PathBasedCacheDescriptor> makeRequest(
+ public BatchedEntries<PathBasedCacheDirective> makeRequest(
Long nextKey) throws IOException {
- return namesystem.listPathBasedCacheDescriptors(nextKey, pool, path);
+ return namesystem.listPathBasedCacheDirectives(nextKey, filter);
}
@Override
- public Long elementToPrevKey(PathBasedCacheDescriptor entry) {
- return entry.getEntryId();
+ public Long elementToPrevKey(PathBasedCacheDirective entry) {
+ return entry.getId();
}
}
@Override
- public RemoteIterator<PathBasedCacheDescriptor> listPathBasedCacheDescriptors(long prevId,
- String pool, String path) throws IOException {
- return new ServerSidePathBasedCacheEntriesIterator(prevId, pool, path);
+ public RemoteIterator<PathBasedCacheDirective> listPathBasedCacheDirectives(long prevId,
+ PathBasedCacheDirective filter) throws IOException {
+ if (filter == null) {
+ filter = new PathBasedCacheDirective.Builder().build();
+ }
+ return new ServerSidePathBasedCacheEntriesIterator(prevId, filter);
}
@Override
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java Fri Nov 8 01:44:24 2013
@@ -30,11 +30,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
-import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.tools.TableListing.Justification;
import org.apache.hadoop.ipc.RemoteException;
@@ -180,11 +177,9 @@ public class CacheAdmin extends Configur
setPool(poolName).
build();
try {
- PathBasedCacheDescriptor descriptor =
- dfs.addPathBasedCacheDirective(directive);
- System.out.println("Added PathBasedCache entry "
- + descriptor.getEntryId());
- } catch (AddPathBasedCacheDirectiveException e) {
+ long id = dfs.addPathBasedCacheDirective(directive);
+ System.out.println("Added PathBasedCache entry " + id);
+ } catch (IOException e) {
System.err.println(prettifyException(e));
return 2;
}
@@ -243,9 +238,9 @@ public class CacheAdmin extends Configur
}
DistributedFileSystem dfs = getDFS(conf);
try {
- dfs.getClient().removePathBasedCacheDescriptor(id);
+ dfs.getClient().removePathBasedCacheDirective(id);
System.out.println("Removed PathBasedCache directive " + id);
- } catch (RemovePathBasedCacheDescriptorException e) {
+ } catch (IOException e) {
System.err.println(prettifyException(e));
return 2;
}
@@ -261,13 +256,13 @@ public class CacheAdmin extends Configur
@Override
public String getShortUsage() {
- return "[" + getName() + " <path>]\n";
+ return "[" + getName() + " -path <path>]\n";
}
@Override
public String getLongUsage() {
TableListing listing = getOptionDescriptionListing();
- listing.addRow("<path>", "The path of the cache directives to remove. " +
+ listing.addRow("-path <path>", "The path of the cache directives to remove. " +
"You must have write permission on the pool of the directive in order " +
"to remove it. To see a list of cache directives, use the " +
"-listDirectives command.");
@@ -289,16 +284,18 @@ public class CacheAdmin extends Configur
return 1;
}
DistributedFileSystem dfs = getDFS(conf);
- RemoteIterator<PathBasedCacheDescriptor> iter =
- dfs.listPathBasedCacheDescriptors(null, new Path(path));
+ RemoteIterator<PathBasedCacheDirective> iter =
+ dfs.listPathBasedCacheDirectives(
+ new PathBasedCacheDirective.Builder().
+ setPath(new Path(path)).build());
int exitCode = 0;
while (iter.hasNext()) {
- PathBasedCacheDescriptor entry = iter.next();
+ PathBasedCacheDirective directive = iter.next();
try {
- dfs.removePathBasedCacheDescriptor(entry);
+ dfs.removePathBasedCacheDirective(directive.getId());
System.out.println("Removed PathBasedCache directive " +
- entry.getEntryId());
- } catch (RemovePathBasedCacheDescriptorException e) {
+ directive.getId());
+ } catch (IOException e) {
System.err.println(prettifyException(e));
exitCode = 2;
}
@@ -338,8 +335,16 @@ public class CacheAdmin extends Configur
@Override
public int run(Configuration conf, List<String> args) throws IOException {
+ PathBasedCacheDirective.Builder builder =
+ new PathBasedCacheDirective.Builder();
String pathFilter = StringUtils.popOptionWithArgument("-path", args);
+ if (pathFilter != null) {
+ builder.setPath(new Path(pathFilter));
+ }
String poolFilter = StringUtils.popOptionWithArgument("-pool", args);
+ if (poolFilter != null) {
+ builder.setPool(poolFilter);
+ }
if (!args.isEmpty()) {
System.err.println("Can't understand argument: " + args.get(0));
return 1;
@@ -350,15 +355,14 @@ public class CacheAdmin extends Configur
addField("PATH", Justification.LEFT).
build();
DistributedFileSystem dfs = getDFS(conf);
- RemoteIterator<PathBasedCacheDescriptor> iter =
- dfs.listPathBasedCacheDescriptors(poolFilter, pathFilter != null ?
- new Path(pathFilter) : null);
+ RemoteIterator<PathBasedCacheDirective> iter =
+ dfs.listPathBasedCacheDirectives(builder.build());
int numEntries = 0;
while (iter.hasNext()) {
- PathBasedCacheDescriptor entry = iter.next();
+ PathBasedCacheDirective directive = iter.next();
String row[] = new String[] {
- "" + entry.getEntryId(), entry.getPool(),
- entry.getPath().toUri().getPath(),
+ "" + directive.getId(), directive.getPool(),
+ directive.getPath().toUri().getPath(),
};
tableListing.addRow(row);
numEntries++;
@@ -720,7 +724,6 @@ public class CacheAdmin extends Configur
return 0;
}
String commandName = args.get(0);
- commandName = commandName.replaceAll("^[-]*", "");
Command command = determineCommand(commandName);
if (command == null) {
System.err.print("Sorry, I don't know the command '" +
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java Fri Nov 8 01:44:24 2013
@@ -252,9 +252,24 @@ public class XMLUtils {
* @return the entry
*/
public String getValue(String name) throws InvalidXmlException {
- if (!subtrees.containsKey(name)) {
+ String ret = getValueOrNull(name);
+ if (ret == null) {
throw new InvalidXmlException("no entry found for " + name);
}
+ return ret;
+ }
+
+ /**
+ * Pull a string entry from a stanza, or null.
+ *
+ * @param name entry to look for
+ *
+ * @return the entry, or null if it was not found.
+ */
+ public String getValueOrNull(String name) throws InvalidXmlException {
+ if (!subtrees.containsKey(name)) {
+ return null;
+ }
LinkedList <Stanza> l = subtrees.get(name);
if (l.size() != 1) {
throw new InvalidXmlException("More than one value found for " + name);
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Fri Nov 8 01:44:24 2013
@@ -364,42 +364,46 @@ message IsFileClosedResponseProto {
required bool result = 1;
}
-message PathBasedCacheDirectiveProto {
- required string path = 1;
- required uint32 replication = 2;
- required string pool = 3;
+message PathBasedCacheDirectiveInfoProto {
+ optional int64 id = 1;
+ optional string path = 2;
+ optional uint32 replication = 3;
+ optional string pool = 4;
}
message AddPathBasedCacheDirectiveRequestProto {
- required PathBasedCacheDirectiveProto directive = 1;
+ required PathBasedCacheDirectiveInfoProto info = 1;
}
message AddPathBasedCacheDirectiveResponseProto {
- required int64 descriptorId = 1;
+ required int64 id = 1;
+}
+
+message ModifyPathBasedCacheDirectiveRequestProto {
+ required PathBasedCacheDirectiveInfoProto info = 1;
}
-message RemovePathBasedCacheDescriptorRequestProto {
- required int64 descriptorId = 1;
+message ModifyPathBasedCacheDirectiveResponseProto {
}
-message RemovePathBasedCacheDescriptorResponseProto {
+message RemovePathBasedCacheDirectiveRequestProto {
+ required int64 id = 1;
+}
+
+message RemovePathBasedCacheDirectiveResponseProto {
}
-message ListPathBasedCacheDescriptorsRequestProto {
+message ListPathBasedCacheDirectivesRequestProto {
required int64 prevId = 1;
- optional string pool = 2;
- optional string path = 3;
+ required PathBasedCacheDirectiveInfoProto filter = 2;
}
-message ListPathBasedCacheDescriptorsElementProto {
- required int64 id = 1;
- required string pool = 2;
- required uint32 replication = 3;
- required string path = 4;
+message ListPathBasedCacheDirectivesElementProto {
+ required PathBasedCacheDirectiveInfoProto info = 1;
}
-message ListPathBasedCacheDescriptorsResponseProto {
- repeated ListPathBasedCacheDescriptorsElementProto elements = 1;
+message ListPathBasedCacheDirectivesResponseProto {
+ repeated ListPathBasedCacheDirectivesElementProto elements = 1;
required bool hasMore = 2;
}
@@ -633,10 +637,12 @@ service ClientNamenodeProtocol {
rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto);
rpc addPathBasedCacheDirective(AddPathBasedCacheDirectiveRequestProto)
returns (AddPathBasedCacheDirectiveResponseProto);
- rpc removePathBasedCacheDescriptor(RemovePathBasedCacheDescriptorRequestProto)
- returns (RemovePathBasedCacheDescriptorResponseProto);
- rpc listPathBasedCacheDescriptors(ListPathBasedCacheDescriptorsRequestProto)
- returns (ListPathBasedCacheDescriptorsResponseProto);
+ rpc modifyPathBasedCacheDirective(ModifyPathBasedCacheDirectiveRequestProto)
+ returns (ModifyPathBasedCacheDirectiveResponseProto);
+ rpc removePathBasedCacheDirective(RemovePathBasedCacheDirectiveRequestProto)
+ returns (RemovePathBasedCacheDirectiveResponseProto);
+ rpc listPathBasedCacheDirectives(ListPathBasedCacheDirectivesRequestProto)
+ returns (ListPathBasedCacheDirectivesResponseProto);
rpc addCachePool(AddCachePoolRequestProto)
returns(AddCachePoolResponseProto);
rpc modifyCachePool(ModifyCachePoolRequestProto)
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Fri Nov 8 01:44:24 2013
@@ -1487,10 +1487,10 @@
</property>
<property>
- <name>dfs.namenode.list.cache.descriptors.num.responses</name>
+ <name>dfs.namenode.list.cache.directives.num.responses</name>
<value>100</value>
<description>
- This value controls the number of cache descriptors that the NameNode will
+ This value controls the number of cache directives that the NameNode will
send over the wire in response to a listDirectives RPC.
</description>
</property>
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Fri Nov 8 01:44:24 2013
@@ -1033,19 +1033,26 @@ public class DFSTestUtil {
locatedBlocks = DFSClientAdapter.callGetBlockLocations(
cluster.getNameNodeRpc(nnIndex), filePath, 0L, bytes.length);
} while (locatedBlocks.isUnderConstruction());
- // OP_ADD_CACHE_POOL 35
+ // OP_ADD_CACHE_POOL
filesystem.addCachePool(new CachePoolInfo("pool1"));
- // OP_MODIFY_CACHE_POOL 36
+ // OP_MODIFY_CACHE_POOL
filesystem.modifyCachePool(new CachePoolInfo("pool1").setWeight(99));
- // OP_ADD_PATH_BASED_CACHE_DIRECTIVE 33
- PathBasedCacheDescriptor pbcd = filesystem.addPathBasedCacheDirective(
+ // OP_ADD_PATH_BASED_CACHE_DIRECTIVE
+ long id = filesystem.addPathBasedCacheDirective(
new PathBasedCacheDirective.Builder().
setPath(new Path("/path")).
+ setReplication((short)1).
setPool("pool1").
build());
- // OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR 34
- filesystem.removePathBasedCacheDescriptor(pbcd);
- // OP_REMOVE_CACHE_POOL 37
+ // OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE
+ filesystem.modifyPathBasedCacheDirective(
+ new PathBasedCacheDirective.Builder().
+ setId(id).
+ setReplication((short)2).
+ build());
+ // OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE
+ filesystem.removePathBasedCacheDirective(id);
+ // OP_REMOVE_CACHE_POOL
filesystem.removeCachePool("pool1");
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java Fri Nov 8 01:44:24 2013
@@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.MiniDFSClu
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Util;
@@ -242,14 +241,20 @@ public class OfflineEditsViewerHelper {
.setMode(new FsPermission((short)0700))
.setWeight(1989));
// OP_ADD_PATH_BASED_CACHE_DIRECTIVE 33
- PathBasedCacheDescriptor descriptor =
- dfs.addPathBasedCacheDirective(new PathBasedCacheDirective.Builder().
+ long id = dfs.addPathBasedCacheDirective(
+ new PathBasedCacheDirective.Builder().
setPath(new Path("/bar")).
setReplication((short)1).
setPool(pool).
build());
- // OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR 34
- dfs.removePathBasedCacheDescriptor(descriptor);
+ // OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE 38
+ dfs.modifyPathBasedCacheDirective(
+ new PathBasedCacheDirective.Builder().
+ setId(id).
+ setPath(new Path("/bar2")).
+ build());
+ // OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE 34
+ dfs.removePathBasedCacheDirective(id);
// OP_REMOVE_CACHE_POOL 37
dfs.removeCachePool(pool);
// sync to disk, otherwise we parse partial edits
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1539898&r1=1539897&r2=1539898&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Fri Nov 8 01:44:24 2013
@@ -414,7 +414,7 @@ public class TestNamenodeRetryCache {
LightWeightCache<CacheEntry, CacheEntry> cacheSet =
(LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
- assertEquals(19, cacheSet.size());
+ assertEquals(20, cacheSet.size());
Map<CacheEntry, CacheEntry> oldEntries =
new HashMap<CacheEntry, CacheEntry>();
@@ -433,7 +433,7 @@ public class TestNamenodeRetryCache {
assertTrue(namesystem.hasRetryCache());
cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
.getRetryCache().getCacheSet();
- assertEquals(19, cacheSet.size());
+ assertEquals(20, cacheSet.size());
iter = cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry = iter.next();