You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2014/07/12 04:24:55 UTC
svn commit: r1609878 [6/9] - in
/hadoop/common/branches/YARN-1051/hadoop-hdfs-project: hadoop-hdfs-httpfs/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/
hadoop-hdfs...
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java Sat Jul 12 02:24:40 2014
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.server.blo
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.Quota;
/**
@@ -73,7 +74,41 @@ public class FileWithSnapshotFeature imp
}
return max;
}
-
+
+ boolean changedBetweenSnapshots(INodeFile file, Snapshot from, Snapshot to) {
+ int[] diffIndexPair = diffs.changedBetweenSnapshots(from, to);
+ if (diffIndexPair == null) {
+ return false;
+ }
+ int earlierDiffIndex = diffIndexPair[0];
+ int laterDiffIndex = diffIndexPair[1];
+
+ final List<FileDiff> diffList = diffs.asList();
+ final long earlierLength = diffList.get(earlierDiffIndex).getFileSize();
+ final long laterLength = laterDiffIndex == diffList.size() ? file
+ .computeFileSize(true, false) : diffList.get(laterDiffIndex)
+ .getFileSize();
+ if (earlierLength != laterLength) { // file length has been changed
+ return true;
+ }
+
+ INodeFileAttributes earlierAttr = null; // check the metadata
+ for (int i = earlierDiffIndex; i < laterDiffIndex; i++) {
+ FileDiff diff = diffList.get(i);
+ if (diff.snapshotINode != null) {
+ earlierAttr = diff.snapshotINode;
+ break;
+ }
+ }
+ if (earlierAttr == null) { // no meta-change at all, return false
+ return false;
+ }
+ INodeFileAttributes laterAttr = diffs.getSnapshotINode(
+ Math.max(Snapshot.getSnapshotId(from), Snapshot.getSnapshotId(to)),
+ file);
+ return !earlierAttr.metadataEquals(laterAttr);
+ }
+
public String getDetailedString() {
return (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs;
}
@@ -159,7 +194,7 @@ public class FileWithSnapshotFeature imp
// resize the array.
final BlockInfo[] newBlocks;
if (n == 0) {
- newBlocks = null;
+ newBlocks = BlockInfo.EMPTY_ARRAY;
} else {
newBlocks = new BlockInfo[n];
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java Sat Jul 12 02:24:40 2014
@@ -184,15 +184,14 @@ public class Snapshot implements Compara
/** The root directory of the snapshot. */
private final Root root;
- Snapshot(int id, String name, INodeDirectorySnapshottable dir) {
+ Snapshot(int id, String name, INodeDirectory dir) {
this(id, dir, dir);
this.root.setLocalName(DFSUtil.string2Bytes(name));
}
- Snapshot(int id, INodeDirectory dir, INodeDirectorySnapshottable parent) {
+ Snapshot(int id, INodeDirectory dir, INodeDirectory parent) {
this.id = id;
this.root = new Root(dir);
-
this.root.setParent(parent);
}
Copied: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java (from r1605891, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java)
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java?p2=hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java&p1=hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java&r1=1605891&r2=1609878&rev=1609878&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java Sat Jul 12 02:24:40 2014
@@ -99,7 +99,7 @@ class SnapshotDiffInfo {
}
/** The root directory of the snapshots */
- private final INodeDirectorySnapshottable snapshotRoot;
+ private final INodeDirectory snapshotRoot;
/** The starting point of the difference */
private final Snapshot from;
/** The end point of the difference */
@@ -122,8 +122,8 @@ class SnapshotDiffInfo {
private final Map<Long, RenameEntry> renameMap =
new HashMap<Long, RenameEntry>();
- SnapshotDiffInfo(INodeDirectorySnapshottable snapshotRoot, Snapshot start,
- Snapshot end) {
+ SnapshotDiffInfo(INodeDirectory snapshotRoot, Snapshot start, Snapshot end) {
+ Preconditions.checkArgument(snapshotRoot.isSnapshottable());
this.snapshotRoot = snapshotRoot;
this.from = start;
this.to = end;
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java Sat Jul 12 02:24:40 2014
@@ -41,6 +41,8 @@ import org.apache.hadoop.hdfs.tools.snap
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import com.google.common.base.Preconditions;
+
/**
* A helper class defining static methods for reading/writing snapshot related
* information from/to FSImage.
@@ -52,17 +54,19 @@ public class SnapshotFSImageFormat {
* @param out The {@link DataOutput} to write.
* @throws IOException
*/
- public static void saveSnapshots(INodeDirectorySnapshottable current,
- DataOutput out) throws IOException {
+ public static void saveSnapshots(INodeDirectory current, DataOutput out)
+ throws IOException {
+ DirectorySnapshottableFeature sf = current.getDirectorySnapshottableFeature();
+ Preconditions.checkArgument(sf != null);
// list of snapshots in snapshotsByNames
- ReadOnlyList<Snapshot> snapshots = current.getSnapshotsByNames();
+ ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
out.writeInt(snapshots.size());
for (Snapshot s : snapshots) {
// write the snapshot id
out.writeInt(s.getId());
}
// snapshot quota
- out.writeInt(current.getSnapshotQuota());
+ out.writeInt(sf.getSnapshotQuota());
}
/**
@@ -216,19 +220,22 @@ public class SnapshotFSImageFormat {
* @param loader
* The loader
*/
- public static void loadSnapshotList(
- INodeDirectorySnapshottable snapshottableParent, int numSnapshots,
- DataInput in, FSImageFormat.Loader loader) throws IOException {
+ public static void loadSnapshotList(INodeDirectory snapshottableParent,
+ int numSnapshots, DataInput in, FSImageFormat.Loader loader)
+ throws IOException {
+ DirectorySnapshottableFeature sf = snapshottableParent
+ .getDirectorySnapshottableFeature();
+ Preconditions.checkArgument(sf != null);
for (int i = 0; i < numSnapshots; i++) {
// read snapshots
final Snapshot s = loader.getSnapshot(in);
s.getRoot().setParent(snapshottableParent);
- snapshottableParent.addSnapshot(s);
+ sf.addSnapshot(s);
}
int snapshotQuota = in.readInt();
snapshottableParent.setSnapshotQuota(snapshotQuota);
}
-
+
/**
* Load the {@link SnapshotDiff} list for the INodeDirectoryWithSnapshot
* directory.
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java Sat Jul 12 02:24:40 2014
@@ -30,9 +30,11 @@ import java.util.concurrent.atomic.Atomi
import javax.management.ObjectName;
import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -40,9 +42,10 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SnapshotDiffInfo;
import org.apache.hadoop.metrics2.util.MBeans;
+import com.google.common.base.Preconditions;
+
/**
* Manage snapshottable directories and their snapshots.
*
@@ -65,8 +68,8 @@ public class SnapshotManager implements
private int snapshotCounter = 0;
/** All snapshottable directories in the namesystem. */
- private final Map<Long, INodeDirectorySnapshottable> snapshottables
- = new HashMap<Long, INodeDirectorySnapshottable>();
+ private final Map<Long, INodeDirectory> snapshottables =
+ new HashMap<Long, INodeDirectory>();
public SnapshotManager(final FSDirectory fsdir) {
this.fsdir = fsdir;
@@ -83,7 +86,7 @@ public class SnapshotManager implements
return;
}
- for(INodeDirectorySnapshottable s : snapshottables.values()) {
+ for(INodeDirectory s : snapshottables.values()) {
if (s.isAncestorDirectory(dir)) {
throw new SnapshotException(
"Nested snapshottable directories not allowed: path=" + path
@@ -111,33 +114,30 @@ public class SnapshotManager implements
checkNestedSnapshottable(d, path);
}
-
- final INodeDirectorySnapshottable s;
if (d.isSnapshottable()) {
//The directory is already a snapshottable directory.
- s = (INodeDirectorySnapshottable)d;
- s.setSnapshotQuota(INodeDirectorySnapshottable.SNAPSHOT_LIMIT);
+ d.setSnapshotQuota(DirectorySnapshottableFeature.SNAPSHOT_LIMIT);
} else {
- s = d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshotId(),
- fsdir.getINodeMap());
+ d.addSnapshottableFeature();
}
- addSnapshottable(s);
+ addSnapshottable(d);
}
/** Add the given snapshottable directory to {@link #snapshottables}. */
- public void addSnapshottable(INodeDirectorySnapshottable dir) {
+ public void addSnapshottable(INodeDirectory dir) {
+ Preconditions.checkArgument(dir.isSnapshottable());
snapshottables.put(dir.getId(), dir);
}
/** Remove the given snapshottable directory from {@link #snapshottables}. */
- private void removeSnapshottable(INodeDirectorySnapshottable s) {
+ private void removeSnapshottable(INodeDirectory s) {
snapshottables.remove(s.getId());
}
/** Remove snapshottable directories from {@link #snapshottables} */
- public void removeSnapshottable(List<INodeDirectorySnapshottable> toRemove) {
+ public void removeSnapshottable(List<INodeDirectory> toRemove) {
if (toRemove != null) {
- for (INodeDirectorySnapshottable s : toRemove) {
+ for (INodeDirectory s : toRemove) {
removeSnapshottable(s);
}
}
@@ -151,22 +151,22 @@ public class SnapshotManager implements
public void resetSnapshottable(final String path) throws IOException {
final INodesInPath iip = fsdir.getINodesInPath4Write(path);
final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
- if (!d.isSnapshottable()) {
+ DirectorySnapshottableFeature sf = d.getDirectorySnapshottableFeature();
+ if (sf == null) {
// the directory is already non-snapshottable
return;
}
- final INodeDirectorySnapshottable s = (INodeDirectorySnapshottable) d;
- if (s.getNumSnapshots() > 0) {
+ if (sf.getNumSnapshots() > 0) {
throw new SnapshotException("The directory " + path + " has snapshot(s). "
+ "Please redo the operation after removing all the snapshots.");
}
- if (s == fsdir.getRoot()) {
- s.setSnapshotQuota(0);
+ if (d == fsdir.getRoot()) {
+ d.setSnapshotQuota(0);
} else {
- s.replaceSelf(iip.getLatestSnapshotId(), fsdir.getINodeMap());
+ d.removeSnapshottableFeature();
}
- removeSnapshottable(s);
+ removeSnapshottable(d);
}
/**
@@ -179,10 +179,15 @@ public class SnapshotManager implements
* Throw IOException when the given path does not lead to an
* existing snapshottable directory.
*/
- public INodeDirectorySnapshottable getSnapshottableRoot(final String path
- ) throws IOException {
- final INodesInPath i = fsdir.getINodesInPath4Write(path);
- return INodeDirectorySnapshottable.valueOf(i.getLastINode(), path);
+ public INodeDirectory getSnapshottableRoot(final String path)
+ throws IOException {
+ final INodeDirectory dir = INodeDirectory.valueOf(fsdir
+ .getINodesInPath4Write(path).getLastINode(), path);
+ if (!dir.isSnapshottable()) {
+ throw new SnapshotException(
+ "Directory is not a snapshottable directory: " + path);
+ }
+ return dir;
}
/**
@@ -201,7 +206,7 @@ public class SnapshotManager implements
*/
public String createSnapshot(final String path, String snapshotName
) throws IOException {
- INodeDirectorySnapshottable srcRoot = getSnapshottableRoot(path);
+ INodeDirectory srcRoot = getSnapshottableRoot(path);
if (snapshotCounter == getMaxSnapshotID()) {
// We have reached the maximum allowable snapshot ID and since we don't
@@ -234,7 +239,7 @@ public class SnapshotManager implements
// parse the path, and check if the path is a snapshot path
// the INodeDirectorySnapshottable#valueOf method will throw Exception
// if the path is not for a snapshottable directory
- INodeDirectorySnapshottable srcRoot = getSnapshottableRoot(path);
+ INodeDirectory srcRoot = getSnapshottableRoot(path);
srcRoot.removeSnapshot(snapshotName, collectedBlocks, removedINodes);
numSnapshots.getAndDecrement();
}
@@ -257,8 +262,7 @@ public class SnapshotManager implements
final String newSnapshotName) throws IOException {
// Find the source root directory path where the snapshot was taken.
// All the check for path has been included in the valueOf method.
- final INodeDirectorySnapshottable srcRoot
- = INodeDirectorySnapshottable.valueOf(fsdir.getINode(path), path);
+ final INodeDirectory srcRoot = getSnapshottableRoot(path);
// Note that renameSnapshot and createSnapshot are synchronized externally
// through FSNamesystem's write lock
srcRoot.renameSnapshot(path, oldSnapshotName, newSnapshotName);
@@ -284,9 +288,9 @@ public class SnapshotManager implements
snapshotCounter = counter;
}
- INodeDirectorySnapshottable[] getSnapshottableDirs() {
+ INodeDirectory[] getSnapshottableDirs() {
return snapshottables.values().toArray(
- new INodeDirectorySnapshottable[snapshottables.size()]);
+ new INodeDirectory[snapshottables.size()]);
}
/**
@@ -298,8 +302,9 @@ public class SnapshotManager implements
out.writeInt(numSnapshots.get());
// write all snapshots.
- for(INodeDirectorySnapshottable snapshottableDir : snapshottables.values()) {
- for(Snapshot s : snapshottableDir.getSnapshotsByNames()) {
+ for(INodeDirectory snapshottableDir : snapshottables.values()) {
+ for (Snapshot s : snapshottableDir.getDirectorySnapshottableFeature()
+ .getSnapshotList()) {
s.write(out);
}
}
@@ -338,16 +343,16 @@ public class SnapshotManager implements
List<SnapshottableDirectoryStatus> statusList =
new ArrayList<SnapshottableDirectoryStatus>();
- for (INodeDirectorySnapshottable dir : snapshottables.values()) {
+ for (INodeDirectory dir : snapshottables.values()) {
if (userName == null || userName.equals(dir.getUserName())) {
SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
dir.getModificationTime(), dir.getAccessTime(),
dir.getFsPermission(), dir.getUserName(), dir.getGroupName(),
dir.getLocalNameBytes(), dir.getId(),
dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
- dir.getNumSnapshots(),
- dir.getSnapshotQuota(), dir.getParent() == null ?
- DFSUtil.EMPTY_BYTES :
+ dir.getDirectorySnapshottableFeature().getNumSnapshots(),
+ dir.getDirectorySnapshottableFeature().getSnapshotQuota(),
+ dir.getParent() == null ? DFSUtil.EMPTY_BYTES :
DFSUtil.string2Bytes(dir.getParent().getFullPathName()));
statusList.add(status);
}
@@ -361,21 +366,22 @@ public class SnapshotManager implements
* Compute the difference between two snapshots of a directory, or between a
* snapshot of the directory and its current tree.
*/
- public SnapshotDiffInfo diff(final String path, final String from,
+ public SnapshotDiffReport diff(final String path, final String from,
final String to) throws IOException {
+ // Find the source root directory path where the snapshots were taken.
+ // All the check for path has been included in the valueOf method.
+ final INodeDirectory snapshotRoot = getSnapshottableRoot(path);
+
if ((from == null || from.isEmpty())
&& (to == null || to.isEmpty())) {
// both fromSnapshot and toSnapshot indicate the current tree
- return null;
+ return new SnapshotDiffReport(path, from, to,
+ Collections.<DiffReportEntry> emptyList());
}
-
- // Find the source root directory path where the snapshots were taken.
- // All the check for path has been included in the valueOf method.
- INodesInPath inodesInPath = fsdir.getINodesInPath4Write(path.toString());
- final INodeDirectorySnapshottable snapshotRoot = INodeDirectorySnapshottable
- .valueOf(inodesInPath.getLastINode(), path);
-
- return snapshotRoot.computeDiff(from, to);
+ final SnapshotDiffInfo diffs = snapshotRoot
+ .getDirectorySnapshottableFeature().computeDiff(snapshotRoot, from, to);
+ return diffs != null ? diffs.generateReport() : new SnapshotDiffReport(
+ path, from, to, Collections.<DiffReportEntry> emptyList());
}
public void clearSnapshottableDirs() {
@@ -408,7 +414,7 @@ public class SnapshotManager implements
getSnapshottableDirectories() {
List<SnapshottableDirectoryStatus.Bean> beans =
new ArrayList<SnapshottableDirectoryStatus.Bean>();
- for (INodeDirectorySnapshottable d : getSnapshottableDirs()) {
+ for (INodeDirectory d : getSnapshottableDirs()) {
beans.add(toBean(d));
}
return beans.toArray(new SnapshottableDirectoryStatus.Bean[beans.size()]);
@@ -417,20 +423,19 @@ public class SnapshotManager implements
@Override // SnapshotStatsMXBean
public SnapshotInfo.Bean[] getSnapshots() {
List<SnapshotInfo.Bean> beans = new ArrayList<SnapshotInfo.Bean>();
- for (INodeDirectorySnapshottable d : getSnapshottableDirs()) {
- for (Snapshot s : d.getSnapshotList()) {
+ for (INodeDirectory d : getSnapshottableDirs()) {
+ for (Snapshot s : d.getDirectorySnapshottableFeature().getSnapshotList()) {
beans.add(toBean(s));
}
}
return beans.toArray(new SnapshotInfo.Bean[beans.size()]);
}
- public static SnapshottableDirectoryStatus.Bean toBean(
- INodeDirectorySnapshottable d) {
+ public static SnapshottableDirectoryStatus.Bean toBean(INodeDirectory d) {
return new SnapshottableDirectoryStatus.Bean(
d.getFullPathName(),
- d.getNumSnapshots(),
- d.getSnapshotQuota(),
+ d.getDirectorySnapshottableFeature().getNumSnapshots(),
+ d.getDirectorySnapshottableFeature().getSnapshotQuota(),
d.getModificationTime(),
Short.valueOf(Integer.toOctalString(
d.getFsPermissionShort())),
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java Sat Jul 12 02:24:40 2014
@@ -82,6 +82,11 @@ public class DatanodeStorage {
}
@Override
+ public String toString() {
+ return "DatanodeStorage["+ storageID + "," + storageType + "," + state +"]";
+ }
+
+ @Override
public boolean equals(Object other){
if (other == this) {
return true;
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java Sat Jul 12 02:24:40 2014
@@ -111,7 +111,7 @@ public class ShortCircuitCache implement
Long evictionTimeNs = Long.valueOf(0);
while (true) {
Entry<Long, ShortCircuitReplica> entry =
- evictableMmapped.ceilingEntry(evictionTimeNs);
+ evictable.ceilingEntry(evictionTimeNs);
if (entry == null) break;
evictionTimeNs = entry.getKey();
long evictionTimeMs =
@@ -384,10 +384,6 @@ public class ShortCircuitCache implement
this.shmManager = shmManager;
}
- public long getMmapRetryTimeoutMs() {
- return mmapRetryTimeoutMs;
- }
-
public long getStaleThresholdMs() {
return staleThresholdMs;
}
@@ -847,7 +843,7 @@ public class ShortCircuitCache implement
} else if (replica.mmapData instanceof Long) {
long lastAttemptTimeMs = (Long)replica.mmapData;
long delta = Time.monotonicNow() - lastAttemptTimeMs;
- if (delta < staleThresholdMs) {
+ if (delta < mmapRetryTimeoutMs) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": can't create client mmap for " +
replica + " because we failed to " +
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Sat Jul 12 02:24:40 2014
@@ -49,6 +49,7 @@ import org.apache.hadoop.hdfs.Distribute
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -292,7 +293,7 @@ public class DFSAdmin extends FsShell {
static final String USAGE = "-"+NAME+" [<query|prepare|finalize>]";
static final String DESCRIPTION = USAGE + ":\n"
+ " query: query the current rolling upgrade status.\n"
- + " prepare: prepare a new rolling upgrade."
+ + " prepare: prepare a new rolling upgrade.\n"
+ " finalize: finalize the current rolling upgrade.";
/** Check if a command is the rollingUpgrade command
@@ -498,25 +499,60 @@ public class DFSAdmin extends FsShell {
printUsage("-safemode");
return;
}
+
DistributedFileSystem dfs = getDFS();
- boolean inSafeMode = dfs.setSafeMode(action);
+ Configuration dfsConf = dfs.getConf();
+ URI dfsUri = dfs.getUri();
+ boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
- //
- // If we are waiting for safemode to exit, then poll and
- // sleep till we are out of safemode.
- //
- if (waitExitSafe) {
- while (inSafeMode) {
- try {
- Thread.sleep(5000);
- } catch (java.lang.InterruptedException e) {
- throw new IOException("Wait Interrupted");
+ if (isHaEnabled) {
+ String nsId = dfsUri.getHost();
+ List<ProxyAndInfo<ClientProtocol>> proxies =
+ HAUtil.getProxiesForAllNameNodesInNameservice(
+ dfsConf, nsId, ClientProtocol.class);
+ for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+ ClientProtocol haNn = proxy.getProxy();
+ boolean inSafeMode = haNn.setSafeMode(action, false);
+ if (waitExitSafe) {
+ inSafeMode = waitExitSafeMode(haNn, inSafeMode);
}
- inSafeMode = dfs.setSafeMode(SafeModeAction.SAFEMODE_GET);
+ System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF")
+ + " in " + proxy.getAddress());
}
+ } else {
+ boolean inSafeMode = dfs.setSafeMode(action);
+ if (waitExitSafe) {
+ inSafeMode = waitExitSafeMode(dfs, inSafeMode);
+ }
+ System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF"));
}
- System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF"));
+ }
+
+ private boolean waitExitSafeMode(DistributedFileSystem dfs, boolean inSafeMode)
+ throws IOException {
+ while (inSafeMode) {
+ try {
+ Thread.sleep(5000);
+ } catch (java.lang.InterruptedException e) {
+ throw new IOException("Wait Interrupted");
+ }
+ inSafeMode = dfs.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
+ }
+ return inSafeMode;
+ }
+
+ private boolean waitExitSafeMode(ClientProtocol nn, boolean inSafeMode)
+ throws IOException {
+ while (inSafeMode) {
+ try {
+ Thread.sleep(5000);
+ } catch (java.lang.InterruptedException e) {
+ throw new IOException("Wait Interrupted");
+ }
+ inSafeMode = nn.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
+ }
+ return inSafeMode;
}
/**
@@ -561,7 +597,24 @@ public class DFSAdmin extends FsShell {
int exitCode = -1;
DistributedFileSystem dfs = getDFS();
- dfs.saveNamespace();
+ Configuration dfsConf = dfs.getConf();
+ URI dfsUri = dfs.getUri();
+ boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+ if (isHaEnabled) {
+ String nsId = dfsUri.getHost();
+ List<ProxyAndInfo<ClientProtocol>> proxies =
+ HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+ nsId, ClientProtocol.class);
+ for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+ proxy.getProxy().saveNamespace();
+ System.out.println("Save namespace successful for " +
+ proxy.getAddress());
+ }
+ } else {
+ dfs.saveNamespace();
+ System.out.println("Save namespace successful");
+ }
exitCode = 0;
return exitCode;
@@ -583,15 +636,30 @@ public class DFSAdmin extends FsShell {
*/
public int restoreFailedStorage(String arg) throws IOException {
int exitCode = -1;
-
if(!arg.equals("check") && !arg.equals("true") && !arg.equals("false")) {
System.err.println("restoreFailedStorage valid args are true|false|check");
return exitCode;
}
DistributedFileSystem dfs = getDFS();
- Boolean res = dfs.restoreFailedStorage(arg);
- System.out.println("restoreFailedStorage is set to " + res);
+ Configuration dfsConf = dfs.getConf();
+ URI dfsUri = dfs.getUri();
+ boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+ if (isHaEnabled) {
+ String nsId = dfsUri.getHost();
+ List<ProxyAndInfo<ClientProtocol>> proxies =
+ HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+ nsId, ClientProtocol.class);
+ for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+ Boolean res = proxy.getProxy().restoreFailedStorage(arg);
+ System.out.println("restoreFailedStorage is set to " + res + " for "
+ + proxy.getAddress());
+ }
+ } else {
+ Boolean res = dfs.restoreFailedStorage(arg);
+ System.out.println("restoreFailedStorage is set to " + res);
+ }
exitCode = 0;
return exitCode;
@@ -607,7 +675,24 @@ public class DFSAdmin extends FsShell {
int exitCode = -1;
DistributedFileSystem dfs = getDFS();
- dfs.refreshNodes();
+ Configuration dfsConf = dfs.getConf();
+ URI dfsUri = dfs.getUri();
+ boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+ if (isHaEnabled) {
+ String nsId = dfsUri.getHost();
+ List<ProxyAndInfo<ClientProtocol>> proxies =
+ HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+ nsId, ClientProtocol.class);
+ for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
+ proxy.getProxy().refreshNodes();
+ System.out.println("Refresh nodes successful for " +
+ proxy.getAddress());
+ }
+ } else {
+ dfs.refreshNodes();
+ System.out.println("Refresh nodes successful");
+ }
exitCode = 0;
return exitCode;
@@ -641,7 +726,24 @@ public class DFSAdmin extends FsShell {
}
DistributedFileSystem dfs = (DistributedFileSystem) fs;
- dfs.setBalancerBandwidth(bandwidth);
+ Configuration dfsConf = dfs.getConf();
+ URI dfsUri = dfs.getUri();
+ boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+ if (isHaEnabled) {
+ String nsId = dfsUri.getHost();
+ List<ProxyAndInfo<ClientProtocol>> proxies =
+ HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+ nsId, ClientProtocol.class);
+ for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+ proxy.getProxy().setBalancerBandwidth(bandwidth);
+ System.out.println("Balancer bandwidth is set to " + bandwidth +
+ " for " + proxy.getAddress());
+ }
+ } else {
+ dfs.setBalancerBandwidth(bandwidth);
+ System.out.println("Balancer bandwidth is set to " + bandwidth);
+ }
exitCode = 0;
return exitCode;
@@ -937,11 +1039,18 @@ public class DFSAdmin extends FsShell {
if (!HAUtil.isAtLeastOneActive(namenodes)) {
throw new IOException("Cannot finalize with no NameNode active");
}
- for (ClientProtocol haNn : namenodes) {
- haNn.finalizeUpgrade();
+
+ List<ProxyAndInfo<ClientProtocol>> proxies =
+ HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+ nsId, ClientProtocol.class);
+ for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+ proxy.getProxy().finalizeUpgrade();
+ System.out.println("Finalize upgrade successful for " +
+ proxy.getAddress());
}
} else {
dfs.finalizeUpgrade();
+ System.out.println("Finalize upgrade successful");
}
return 0;
@@ -958,9 +1067,25 @@ public class DFSAdmin extends FsShell {
public int metaSave(String[] argv, int idx) throws IOException {
String pathname = argv[idx];
DistributedFileSystem dfs = getDFS();
- dfs.metaSave(pathname);
- System.out.println("Created metasave file " + pathname + " in the log " +
- "directory of namenode " + dfs.getUri());
+ Configuration dfsConf = dfs.getConf();
+ URI dfsUri = dfs.getUri();
+ boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+ if (isHaEnabled) {
+ String nsId = dfsUri.getHost();
+ List<ProxyAndInfo<ClientProtocol>> proxies =
+ HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+ nsId, ClientProtocol.class);
+ for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+ proxy.getProxy().metaSave(pathname);
+ System.out.println("Created metasave file " + pathname + " in the log "
+ + "directory of namenode " + proxy.getAddress());
+ }
+ } else {
+ dfs.metaSave(pathname);
+ System.out.println("Created metasave file " + pathname + " in the log " +
+ "directory of namenode " + dfs.getUri());
+ }
return 0;
}
@@ -1022,20 +1147,37 @@ public class DFSAdmin extends FsShell {
public int refreshServiceAcl() throws IOException {
// Get the current configuration
Configuration conf = getConf();
-
+
// for security authorization
// server principal for this call
// should be NN's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
- // Create the client
- RefreshAuthorizationPolicyProtocol refreshProtocol =
- NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
- RefreshAuthorizationPolicyProtocol.class).getProxy();
-
- // Refresh the authorization policy in-effect
- refreshProtocol.refreshServiceAcl();
+ DistributedFileSystem dfs = getDFS();
+ URI dfsUri = dfs.getUri();
+ boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
+
+ if (isHaEnabled) {
+ // Run refreshServiceAcl for all NNs if HA is enabled
+ String nsId = dfsUri.getHost();
+ List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
+ HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
+ RefreshAuthorizationPolicyProtocol.class);
+ for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
+ proxy.getProxy().refreshServiceAcl();
+ System.out.println("Refresh service acl successful for "
+ + proxy.getAddress());
+ }
+ } else {
+ // Create the client
+ RefreshAuthorizationPolicyProtocol refreshProtocol =
+ NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
+ RefreshAuthorizationPolicyProtocol.class).getProxy();
+ // Refresh the authorization policy in-effect
+ refreshProtocol.refreshServiceAcl();
+ System.out.println("Refresh service acl successful");
+ }
return 0;
}
@@ -1054,14 +1196,32 @@ public class DFSAdmin extends FsShell {
// should be NN's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
-
- // Create the client
- RefreshUserMappingsProtocol refreshProtocol =
- NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
- RefreshUserMappingsProtocol.class).getProxy();
- // Refresh the user-to-groups mappings
- refreshProtocol.refreshUserToGroupsMappings();
+ DistributedFileSystem dfs = getDFS();
+ URI dfsUri = dfs.getUri();
+ boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
+
+ if (isHaEnabled) {
+ // Run refreshUserToGroupsMapings for all NNs if HA is enabled
+ String nsId = dfsUri.getHost();
+ List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
+ HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
+ RefreshUserMappingsProtocol.class);
+ for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
+ proxy.getProxy().refreshUserToGroupsMappings();
+ System.out.println("Refresh user to groups mapping successful for "
+ + proxy.getAddress());
+ }
+ } else {
+ // Create the client
+ RefreshUserMappingsProtocol refreshProtocol =
+ NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
+ RefreshUserMappingsProtocol.class).getProxy();
+
+ // Refresh the user-to-groups mappings
+ refreshProtocol.refreshUserToGroupsMappings();
+ System.out.println("Refresh user to groups mapping successful");
+ }
return 0;
}
@@ -1082,13 +1242,31 @@ public class DFSAdmin extends FsShell {
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
- // Create the client
- RefreshUserMappingsProtocol refreshProtocol =
- NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
- RefreshUserMappingsProtocol.class).getProxy();
+ DistributedFileSystem dfs = getDFS();
+ URI dfsUri = dfs.getUri();
+ boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
- // Refresh the user-to-groups mappings
- refreshProtocol.refreshSuperUserGroupsConfiguration();
+ if (isHaEnabled) {
+ // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled
+ String nsId = dfsUri.getHost();
+ List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
+ HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
+ RefreshUserMappingsProtocol.class);
+ for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
+ proxy.getProxy().refreshSuperUserGroupsConfiguration();
+ System.out.println("Refresh super user groups configuration " +
+ "successful for " + proxy.getAddress());
+ }
+ } else {
+ // Create the client
+ RefreshUserMappingsProtocol refreshProtocol =
+ NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
+ RefreshUserMappingsProtocol.class).getProxy();
+
+ // Refresh the user-to-groups mappings
+ refreshProtocol.refreshSuperUserGroupsConfiguration();
+ System.out.println("Refresh super user groups configuration successful");
+ }
return 0;
}
@@ -1102,15 +1280,33 @@ public class DFSAdmin extends FsShell {
// should be NN's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
-
- // Create the client
- RefreshCallQueueProtocol refreshProtocol =
- NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
- RefreshCallQueueProtocol.class).getProxy();
- // Refresh the call queue
- refreshProtocol.refreshCallQueue();
-
+ DistributedFileSystem dfs = getDFS();
+ URI dfsUri = dfs.getUri();
+ boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
+
+ if (isHaEnabled) {
+ // Run refreshCallQueue for all NNs if HA is enabled
+ String nsId = dfsUri.getHost();
+ List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
+ HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
+ RefreshCallQueueProtocol.class);
+ for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
+ proxy.getProxy().refreshCallQueue();
+ System.out.println("Refresh call queue successful for "
+ + proxy.getAddress());
+ }
+ } else {
+ // Create the client
+ RefreshCallQueueProtocol refreshProtocol =
+ NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
+ RefreshCallQueueProtocol.class).getProxy();
+
+ // Refresh the call queue
+ refreshProtocol.refreshCallQueue();
+ System.out.println("Refresh call queue successful");
+ }
+
return 0;
}
@@ -1244,6 +1440,12 @@ public class DFSAdmin extends FsShell {
} else if ("-fetchImage".equals(cmd)) {
System.err.println("Usage: java DFSAdmin"
+ " [-fetchImage <local directory>]");
+ } else if ("-shutdownDatanode".equals(cmd)) {
+ System.err.println("Usage: java DFSAdmin"
+ + " [-shutdownDatanode <datanode_host:ipc_port> [upgrade]]");
+ } else if ("-getDatanodeInfo".equals(cmd)) {
+ System.err.println("Usage: java DFSAdmin"
+ + " [-getDatanodeInfo <datanode_host:ipc_port>]");
} else {
System.err.println("Usage: java DFSAdmin");
System.err.println("Note: Administrative commands can only be run as the HDFS superuser.");
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java Sat Jul 12 02:24:40 2014
@@ -36,8 +36,8 @@ public class SWebHdfsFileSystem extends
}
@Override
- protected synchronized void initializeTokenAspect() {
- tokenAspect = new TokenAspect<SWebHdfsFileSystem>(this, tokenServiceName, TOKEN_KIND);
+ protected Text getTokenKind() {
+ return TOKEN_KIND;
}
@Override
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Sat Jul 12 02:24:40 2014
@@ -69,11 +69,14 @@ import org.apache.hadoop.io.retry.RetryP
import org.apache.hadoop.io.retry.RetryUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenSelector;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
import org.apache.hadoop.util.Progressable;
import org.mortbay.util.ajax.JSON;
@@ -98,7 +101,7 @@ public class WebHdfsFileSystem extends F
/** Delegation token kind */
public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
- protected TokenAspect<? extends WebHdfsFileSystem> tokenAspect;
+ private boolean canRefreshDelegationToken;
private UserGroupInformation ugi;
private URI uri;
@@ -127,13 +130,8 @@ public class WebHdfsFileSystem extends F
return "http";
}
- /**
- * Initialize tokenAspect. This function is intended to
- * be overridden by SWebHdfsFileSystem.
- */
- protected synchronized void initializeTokenAspect() {
- tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, tokenServiceName,
- TOKEN_KIND);
+ protected Text getTokenKind() {
+ return TOKEN_KIND;
}
@Override
@@ -162,7 +160,6 @@ public class WebHdfsFileSystem extends F
this.tokenServiceName = isLogicalUri ?
HAUtil.buildTokenServiceForLogicalUri(uri)
: SecurityUtil.buildTokenService(getCanonicalUri());
- initializeTokenAspect();
if (!isHA) {
this.retryPolicy =
@@ -195,10 +192,8 @@ public class WebHdfsFileSystem extends F
}
this.workingDir = getHomeDirectory();
-
- if (UserGroupInformation.isSecurityEnabled()) {
- tokenAspect.initDelegationToken(ugi);
- }
+ this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled();
+ this.delegationToken = null;
}
@Override
@@ -213,11 +208,46 @@ public class WebHdfsFileSystem extends F
return b;
}
+ TokenSelector<DelegationTokenIdentifier> tokenSelector =
+ new AbstractDelegationTokenSelector<DelegationTokenIdentifier>(getTokenKind()){};
+
+ // the first getAuthParams() for a non-token op will either get the
+ // internal token from the ugi or lazy fetch one
protected synchronized Token<?> getDelegationToken() throws IOException {
- tokenAspect.ensureTokenInitialized();
+ if (canRefreshDelegationToken && delegationToken == null) {
+ Token<?> token = tokenSelector.selectToken(
+ new Text(getCanonicalServiceName()), ugi.getTokens());
+ // ugi tokens are usually indicative of a task which can't
+ // refetch tokens. even if ugi has credentials, don't attempt
+ // to get another token to match hdfs/rpc behavior
+ if (token != null) {
+ LOG.debug("Using UGI token: " + token);
+ canRefreshDelegationToken = false;
+ } else {
+ token = getDelegationToken(null);
+ if (token != null) {
+ LOG.debug("Fetched new token: " + token);
+ } else { // security is disabled
+ canRefreshDelegationToken = false;
+ }
+ }
+ setDelegationToken(token);
+ }
return delegationToken;
}
+ @VisibleForTesting
+ synchronized boolean replaceExpiredDelegationToken() throws IOException {
+ boolean replaced = false;
+ if (canRefreshDelegationToken) {
+ Token<?> token = getDelegationToken(null);
+ LOG.debug("Replaced expired token: " + token);
+ setDelegationToken(token);
+ replaced = (token != null);
+ }
+ return replaced;
+ }
+
@Override
protected int getDefaultPort() {
return DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
@@ -288,8 +318,8 @@ public class WebHdfsFileSystem extends F
final int code = conn.getResponseCode();
// server is demanding an authentication we don't support
if (code == HttpURLConnection.HTTP_UNAUTHORIZED) {
- throw new IOException(
- new AuthenticationException(conn.getResponseMessage()));
+ // match hdfs/rpc exception
+ throw new AccessControlException(conn.getResponseMessage());
}
if (code != op.getExpectedHttpResponseCode()) {
final Map<?, ?> m;
@@ -309,7 +339,15 @@ public class WebHdfsFileSystem extends F
return m;
}
- final RemoteException re = JsonUtil.toRemoteException(m);
+ IOException re = JsonUtil.toRemoteException(m);
+ // extract UGI-related exceptions and unwrap InvalidToken
+ // the NN mangles these exceptions but the DN does not and may need
+ // to re-fetch a token if either report the token is expired
+ if (re.getMessage().startsWith("Failed to obtain user group information:")) {
+ String[] parts = re.getMessage().split(":\\s+", 3);
+ re = new RemoteException(parts[1], parts[2]);
+ re = ((RemoteException)re).unwrapRemoteException(InvalidToken.class);
+ }
throw unwrapException? toIOException(re): re;
}
return null;
@@ -344,8 +382,6 @@ public class WebHdfsFileSystem extends F
*/
private synchronized void resetStateToFailOver() {
currentNNAddrIndex = (currentNNAddrIndex + 1) % nnAddrs.length;
- delegationToken = null;
- tokenAspect.reset();
}
/**
@@ -371,7 +407,7 @@ public class WebHdfsFileSystem extends F
// Skip adding delegation token for token operations because these
// operations require authentication.
Token<?> token = null;
- if (UserGroupInformation.isSecurityEnabled() && !op.getRequireAuth()) {
+ if (!op.getRequireAuth()) {
token = getDelegationToken();
}
if (token != null) {
@@ -542,11 +578,17 @@ public class WebHdfsFileSystem extends F
validateResponse(op, conn, false);
}
return getResponse(conn);
- } catch (IOException ioe) {
- Throwable cause = ioe.getCause();
- if (cause != null && cause instanceof AuthenticationException) {
- throw ioe; // no retries for auth failures
+ } catch (AccessControlException ace) {
+ // no retries for auth failures
+ throw ace;
+ } catch (InvalidToken it) {
+ // try to replace the expired token with a new one. the attempt
+ // to acquire a new token must be outside this operation's retry
+ // so if it fails after its own retries, this operation fails too.
+ if (op.getRequireAuth() || !replaceExpiredDelegationToken()) {
+ throw it;
}
+ } catch (IOException ioe) {
shouldRetry(ioe, retry);
}
}
@@ -714,6 +756,17 @@ public class WebHdfsFileSystem extends F
};
}
}
+
+ class FsPathConnectionRunner extends AbstractFsPathRunner<HttpURLConnection> {
+ FsPathConnectionRunner(Op op, Path fspath, Param<?,?>... parameters) {
+ super(op, fspath, parameters);
+ }
+ @Override
+ HttpURLConnection getResponse(final HttpURLConnection conn)
+ throws IOException {
+ return conn;
+ }
+ }
/**
* Used by open() which tracks the resolved url itself
@@ -1079,16 +1132,41 @@ public class WebHdfsFileSystem extends F
) throws IOException {
statistics.incrementReadOps(1);
final HttpOpParam.Op op = GetOpParam.Op.OPEN;
- final URL url = toUrl(op, f, new BufferSizeParam(buffersize));
+ // use a runner so the open can recover from an invalid token
+ FsPathConnectionRunner runner =
+ new FsPathConnectionRunner(op, f, new BufferSizeParam(buffersize));
return new FSDataInputStream(new OffsetUrlInputStream(
- new OffsetUrlOpener(url), new OffsetUrlOpener(null)));
+ new UnresolvedUrlOpener(runner), new OffsetUrlOpener(null)));
}
@Override
- public void close() throws IOException {
- super.close();
- synchronized (this) {
- tokenAspect.removeRenewAction();
+ public synchronized void close() throws IOException {
+ try {
+ if (canRefreshDelegationToken && delegationToken != null) {
+ cancelDelegationToken(delegationToken);
+ }
+ } catch (IOException ioe) {
+ LOG.debug("Token cancel failed: "+ioe);
+ } finally {
+ super.close();
+ }
+ }
+
+ // use FsPathConnectionRunner to ensure retries for InvalidTokens
+ class UnresolvedUrlOpener extends ByteRangeInputStream.URLOpener {
+ private final FsPathConnectionRunner runner;
+ UnresolvedUrlOpener(FsPathConnectionRunner runner) {
+ super(null);
+ this.runner = runner;
+ }
+
+ @Override
+ protected HttpURLConnection connect(long offset, boolean resolved)
+ throws IOException {
+ assert offset == 0;
+ HttpURLConnection conn = runner.run();
+ setURL(conn.getURL());
+ return conn;
}
}
@@ -1141,7 +1219,7 @@ public class WebHdfsFileSystem extends F
}
static class OffsetUrlInputStream extends ByteRangeInputStream {
- OffsetUrlInputStream(OffsetUrlOpener o, OffsetUrlOpener r)
+ OffsetUrlInputStream(UnresolvedUrlOpener o, OffsetUrlOpener r)
throws IOException {
super(o, r);
}
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java Sat Jul 12 02:24:40 2014
@@ -31,8 +31,11 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import com.google.common.annotations.VisibleForTesting;
import com.sun.jersey.api.ParamException;
import com.sun.jersey.api.container.ContainerException;
@@ -42,9 +45,22 @@ public class ExceptionHandler implements
public static final Log LOG = LogFactory.getLog(ExceptionHandler.class);
private static Exception toCause(Exception e) {
- final Throwable t = e.getCause();
- if (t != null && t instanceof Exception) {
- e = (Exception)e.getCause();
+ final Throwable t = e.getCause();
+ if (e instanceof SecurityException) {
+ // For the issue reported in HDFS-6475, if SecurityException's cause
+ // is InvalidToken, and the InvalidToken's cause is StandbyException,
+ // return StandbyException; Otherwise, leave the exception as is,
+ // since they are handled elsewhere. See HDFS-6588.
+ if (t != null && t instanceof InvalidToken) {
+ final Throwable t1 = t.getCause();
+ if (t1 != null && t1 instanceof StandbyException) {
+ e = (StandbyException)t1;
+ }
+ }
+ } else {
+ if (t != null && t instanceof Exception) {
+ e = (Exception)t;
+ }
}
return e;
}
@@ -74,6 +90,10 @@ public class ExceptionHandler implements
e = ((RemoteException)e).unwrapRemoteException();
}
+ if (e instanceof SecurityException) {
+ e = toCause(e);
+ }
+
//Map response status
final Response.Status s;
if (e instanceof SecurityException) {
@@ -96,4 +116,9 @@ public class ExceptionHandler implements
final String js = JsonUtil.toJsonString(e);
return Response.status(s).type(MediaType.APPLICATION_JSON).entity(js).build();
}
+
+ @VisibleForTesting
+ public void initResponse(HttpServletResponse response) {
+ this.response = response;
+ }
}
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Sat Jul 12 02:24:40 2014
@@ -275,6 +275,7 @@ message SnapshottableDirectoryListingPro
message SnapshotDiffReportEntryProto {
required bytes fullpath = 1;
required string modificationLabel = 2;
+ optional bytes targetPath = 3;
}
/**
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto Sat Jul 12 02:24:40 2014
@@ -35,8 +35,8 @@ message XAttrProto {
}
message XAttrEditLogProto {
- required string src = 1;
- optional XAttrProto xAttr = 2;
+ optional string src = 1;
+ repeated XAttrProto xAttrs = 2;
}
enum XAttrSetFlagProto {
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier Sat Jul 12 02:24:40 2014
@@ -13,3 +13,5 @@
#
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
+org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier$WebHdfsDelegationTokenIdentifier
+org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier$SWebHdfsDelegationTokenIdentifier
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Sat Jul 12 02:24:40 2014
@@ -1996,4 +1996,16 @@
</description>
</property>
+<property>
+ <name>dfs.namenode.startup.delay.block.deletion.sec</name>
+ <value>0</value>
+ <description>The delay in seconds at which we will pause the blocks deletion
+ after Namenode startup. By default it's disabled.
+ In the case a directory has large number of directories and files are
+ deleted, suggested delay is one hour to give the administrator enough time
+ to notice large number of pending deletion blocks and take corrective
+ action.
+ </description>
+</property>
+
</configuration>
Propchange: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1603348-1609877
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js Sat Jul 12 02:24:40 2014
@@ -58,7 +58,7 @@
var msg = '<p>Path does not exist on HDFS or WebHDFS is disabled. Please check your path or enable WebHDFS</p>';
break;
default:
- var msg = '<p>Failed to retreive data from ' + url + ': ' + err + '</p>';
+ var msg = '<p>Failed to retrieve data from ' + url + ': ' + err + '</p>';
}
show_err_msg(msg);
};
@@ -103,7 +103,7 @@
}
var url = '/webhdfs/v1' + abs_path + '?op=GET_BLOCK_LOCATIONS';
- $.ajax({"url": url, "crossDomain": true}).done(function(data) {
+ $.get(url).done(function(data) {
var d = get_response(data, "LocatedBlocks");
if (d === null) {
show_err_msg(get_response_err_msg(data));
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm Sat Jul 12 02:24:40 2014
@@ -270,7 +270,7 @@ Centralized Cache Management in HDFS
** {Native Libraries}
In order to lock block files into memory, the DataNode relies on native JNI
- code found in <<<libhadoop.so>>>. Be sure to
+ code found in <<<libhadoop.so>>> or <<<hadoop.dll>>> on Windows. Be sure to
{{{../hadoop-common/NativeLibraries.html}enable JNI}} if you are using HDFS
centralized cache management.
@@ -283,11 +283,11 @@ Centralized Cache Management in HDFS
* dfs.datanode.max.locked.memory
This determines the maximum amount of memory a DataNode will use for caching.
- The "locked-in-memory size" ulimit (<<<ulimit -l>>>) of the DataNode user
- also needs to be increased to match this parameter (see below section on
- {{OS Limits}}). When setting this value, please remember that you will need
- space in memory for other things as well, such as the DataNode and
- application JVM heaps and the operating system page cache.
+ On Unix-like systems, the "locked-in-memory size" ulimit (<<<ulimit -l>>>) of
+ the DataNode user also needs to be increased to match this parameter (see
+ below section on {{OS Limits}}). When setting this value, please remember
+ that you will need space in memory for other things as well, such as the
+ DataNode and application JVM heaps and the operating system page cache.
*** Optional
@@ -339,3 +339,6 @@ Centralized Cache Management in HDFS
"unlimited," indicating that there is no limit. Note that it's typical for
<<<ulimit -l>>> to output the memory lock limit in KB, but
dfs.datanode.max.locked.memory must be specified in bytes.
+
+ This information does not apply to deployments on Windows. Windows has no
+ direct equivalent of <<<ulimit -l>>>.
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm Sat Jul 12 02:24:40 2014
@@ -287,13 +287,14 @@ HDFS Federation
Policy could be:
- * <<<node>>> - this is the <default> policy. This balances the storage at
+ * <<<datanode>>> - this is the <default> policy. This balances the storage at
the datanode level. This is similar to balancing policy from prior releases.
* <<<blockpool>>> - this balances the storage at the block pool level.
Balancing at block pool level balances storage at the datanode level also.
- Note that Balander only balances the data and does not balance the namespace.
+ Note that Balancer only balances the data and does not balance the namespace.
+ For the complete command usage, see {{{../hadoop-common/CommandsManual.html#balancer}balancer}}.
** Decommissioning
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm Sat Jul 12 02:24:40 2014
@@ -838,41 +838,22 @@ digest:hdfs-zkfcs:vlUvLnd8MlacsE80rDuu6O
$ mvn clean package -Pdist
- This will generate a jar with the BookKeeperJournalManager, all the dependencies
- needed by the journal manager,
+ This will generate a jar with the BookKeeperJournalManager,
hadoop-hdfs/src/contrib/bkjournal/target/hadoop-hdfs-bkjournal-<VERSION>.jar
- Note that the -Pdist part of the build command is important, as otherwise
- the dependencies would not be packaged in the jar. The dependencies included in
- the jar are {{{http://maven.apache.org/plugins/maven-shade-plugin/}shaded}} to
- avoid conflicts with other dependencies of the NameNode.
+ Note that the -Pdist part of the build command is important, this would
+ copy the dependent bookkeeper-server jar under
+ hadoop-hdfs/src/contrib/bkjournal/target/lib.
*** <<Putting the BookKeeperJournalManager in the NameNode classpath>>
- To run a HDFS namenode using BookKeeper as a backend, copy the bkjournal
- jar, generated above, into the lib directory of hdfs. In the standard
- distribution of HDFS, this is at $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
+ To run a HDFS namenode using BookKeeper as a backend, copy the bkjournal and
+ bookkeeper-server jar, mentioned above, into the lib directory of hdfs. In the
+ standard distribution of HDFS, this is at $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
cp hadoop-hdfs/src/contrib/bkjournal/target/hadoop-hdfs-bkjournal-<VERSION>.jar $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
*** <<Current limitations>>
- 1) NameNode format command will not format the BookKeeper data automatically.
- We have to clean the data manually from BookKeeper cluster
- and create the /ledgers/available path in Zookeeper.
-----
-$ zkCli.sh create /ledgers 0
-$ zkCli.sh create /ledgers/available 0
-----
- Note:
- bookkeeper://zk1:2181;zk2:2181;zk3:2181/hdfsjournal
- The final part /hdfsjournal specifies the znode in zookeeper where
- ledger metadata will be stored. Administrators may set this to anything
- they wish.
-
- 2) Security in BookKeeper. BookKeeper does not support SASL nor SSL for
- connections between the NameNode and BookKeeper storage nodes.
-
- 3) Auto-Recovery of storage node failures. Work inprogress
- {{{https://issues.apache.org/jira/browse/BOOKKEEPER-237 }BOOKKEEPER-237}}.
- Currently we have the tools to manually recover the data from failed storage nodes.
+ 1) Security in BookKeeper. BookKeeper does not support SASL nor SSL for
+ connections between the NameNode and BookKeeper storage nodes.
\ No newline at end of file
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm Sat Jul 12 02:24:40 2014
@@ -88,6 +88,25 @@ HDFS NFS Gateway
</property>
----
+ The AIX NFS client has a {{{https://issues.apache.org/jira/browse/HDFS-6549}few known issues}}
+ that prevent it from working correctly by default with the HDFS NFS
+ Gateway. If you want to be able to access the HDFS NFS Gateway from AIX, you
+ should set the following configuration setting to enable work-arounds for these
+ issues:
+
+----
+<property>
+ <name>nfs.aix.compatibility.mode.enabled</name>
+ <value>true</value>
+</property>
+----
+
+ Note that regular, non-AIX clients should NOT enable AIX compatibility mode.
+ The work-arounds implemented by AIX compatibility mode effectively disable
+ safeguards to ensure that listing of directory contents via NFS returns
+ consistent results, and that all data sent to the NFS server can be assured to
+ have been committed.
+
It's strongly recommended for the users to update a few configuration properties based on their use
cases. All the related configuration properties can be added or updated in hdfs-site.xml.
@@ -322,6 +341,22 @@ HDFS NFS Gateway
Then the users can access HDFS as part of the local file system except that,
hard link and random write are not supported yet.
+* {Allow mounts from unprivileged clients}
+
+ In environments where root access on client machines is not generally
+ available, some measure of security can be obtained by ensuring that only NFS
+ clients originating from privileged ports can connect to the NFS server. This
+ feature is referred to as "port monitoring." This feature is not enabled by default
+ in the HDFS NFS Gateway, but can be optionally enabled by setting the
+ following config in hdfs-site.xml on the NFS Gateway machine:
+
+-------------------------------------------------------------------
+<property>
+ <name>nfs.port.monitoring.disabled</name>
+ <value>false</value>
+</property>
+-------------------------------------------------------------------
+
* {User authentication and mapping}
NFS gateway in this release uses AUTH_UNIX style authentication. When the user on NFS client
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm Sat Jul 12 02:24:40 2014
@@ -77,7 +77,7 @@ HDFS Users Guide
* <<<fetchdt>>>: a utility to fetch DelegationToken and store it in a
file on the local system.
- * Rebalancer: tool to balance the cluster when the data is
+ * Balancer: tool to balance the cluster when the data is
unevenly distributed among DataNodes.
* Upgrade and rollback: after a software upgrade, it is possible
@@ -316,7 +316,7 @@ HDFS Users Guide
For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
-* Rebalancer
+* Balancer
HDFS data might not always be be placed uniformly across the DataNode.
One common reason is addition of new DataNodes to an existing cluster.
@@ -338,7 +338,7 @@ HDFS Users Guide
Due to multiple competing considerations, data might not be uniformly
placed across the DataNodes. HDFS provides a tool for administrators
that analyzes block placement and rebalanaces data across the DataNode.
- A brief administrator's guide for rebalancer as a PDF is attached to
+ A brief administrator's guide for balancer is available at
{{{https://issues.apache.org/jira/browse/HADOOP-1652}HADOOP-1652}}.
For command usage, see {{{../hadoop-common/CommandsManual.html#balancer}balancer}}.
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm Sat Jul 12 02:24:40 2014
@@ -70,6 +70,18 @@ WebHDFS REST API
* {{{Get Delegation Tokens}<<<GETDELEGATIONTOKENS>>>}}
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getDelegationTokens)
+ * {{{Get an XAttr}<<<GETXATTRS>>>}}
+ (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttr)
+
+ * {{{Get multiple XAttrs}<<<GETXATTRS>>>}}
+ (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttrs)
+
+ * {{{Get all XAttrs}<<<GETXATTRS>>>}}
+ (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttrs)
+
+ * {{{List all XAttrs}<<<LISTXATTRS>>>}}
+ (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs)
+
* HTTP PUT
* {{{Create and Write to a File}<<<CREATE>>>}}
@@ -108,6 +120,12 @@ WebHDFS REST API
* {{{Rename Snapshot}<<<RENAMESNAPSHOT>>>}}
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.renameSnapshot)
+ * {{{Set XAttr}<<<SETXATTR>>>}}
+ (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setXAttr)
+
+ * {{{Remove XAttr}<<<REMOVEXATTR>>>}}
+ (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeXAttr)
+
* HTTP POST
* {{{Append to a File}<<<APPEND>>>}}
@@ -376,7 +394,7 @@ Hello, webhdfs user!
* Submit a HTTP PUT request.
+---------------------------------
-curl -i -X PUT "http://<HOST>:<PORT>/<PATH>?op=MKDIRS[&permission=<OCTAL>]"
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=MKDIRS[&permission=<OCTAL>]"
+---------------------------------
The client receives a response with a {{{Boolean JSON Schema}<<<boolean>>> JSON object}}:
@@ -401,7 +419,7 @@ Transfer-Encoding: chunked
* Submit a HTTP PUT request.
+---------------------------------
-curl -i -X PUT "http://<HOST>:<PORT>/<PATH>?op=CREATESYMLINK
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CREATESYMLINK
&destination=<PATH>[&createParent=<true|false>]"
+---------------------------------
@@ -909,6 +927,188 @@ Transfer-Encoding: chunked
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
+* {Extended Attributes(XAttrs) Operations}
+
+** {Set XAttr}
+
+ * Submit a HTTP PUT request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETXATTR
+ &xattr.name=<XATTRNAME>&xattr.value=<XATTRVALUE>
+ &flag=<FLAG>"
++---------------------------------
+
+ The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+ []
+
+ See also:
+ {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setXAttr
+
+
+** {Remove XAttr}
+
+ * Submit a HTTP PUT request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=REMOVEXATTR
+ &xattr.name=<XATTRNAME>"
++---------------------------------
+
+ The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+ []
+
+ See also:
+ {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeXAttr
+
+
+** {Get an XAttr}
+
+ * Submit a HTTP GET request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETXATTRS
+ &xattr.name=<XATTRNAME>&encoding=<ENCODING>"
++---------------------------------
+
+ The client receives a response with a {{{XAttrs JSON Schema}<<<XAttrs>>> JSON object}}:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{
+ "XAttrs": [
+ {
+ "name":"XATTRNAME",
+ "value":"XATTRVALUE"
+ }
+ ]
+}
++---------------------------------
+
+ []
+
+ See also:
+ {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttr
+
+
+** {Get multiple XAttrs}
+
+ * Submit a HTTP GET request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETXATTRS
+ &xattr.name=<XATTRNAME1>&xattr.name=<XATTRNAME2>
+ &encoding=<ENCODING>"
++---------------------------------
+
+ The client receives a response with a {{{XAttrs JSON Schema}<<<XAttrs>>> JSON object}}:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{
+ "XAttrs": [
+ {
+ "name":"XATTRNAME1",
+ "value":"XATTRVALUE1"
+ },
+ {
+ "name":"XATTRNAME2",
+ "value":"XATTRVALUE2"
+ }
+ ]
+}
++---------------------------------
+
+ []
+
+ See also:
+ {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttrs
+
+
+** {Get all XAttrs}
+
+ * Submit a HTTP GET request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETXATTRS
+ &encoding=<ENCODING>"
++---------------------------------
+
+ The client receives a response with a {{{XAttrs JSON Schema}<<<XAttrs>>> JSON object}}:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{
+ "XAttrs": [
+ {
+ "name":"XATTRNAME1",
+ "value":"XATTRVALUE1"
+ },
+ {
+ "name":"XATTRNAME2",
+ "value":"XATTRVALUE2"
+ },
+ {
+ "name":"XATTRNAME3",
+ "value":"XATTRVALUE3"
+ }
+ ]
+}
++---------------------------------
+
+ []
+
+ See also:
+ {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttrs
+
+
+** {List all XAttrs}
+
+ * Submit a HTTP GET request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=LISTXATTRS"
++---------------------------------
+
+ The client receives a response with a {{{XAttrNames JSON Schema}<<<XAttrNames>>> JSON object}}:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{
+ "XAttrNames":"[\"XATTRNAME1\",\"XATTRNAME2\",\"XATTRNAME3\"]"
+}
++---------------------------------
+
+ []
+
+ See also:
+ {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs
+
+
* {Snapshot Operations}
** {Create Snapshot}
@@ -1252,6 +1452,58 @@ Transfer-Encoding: chunked
+---------------------------------
+** {XAttrs JSON Schema}
+
++---------------------------------
+{
+ "name" : "XAttrs",
+ "properties":
+ {
+ "XAttrs":
+ {
+ "type" : "array",
+ "items":
+ {
+ "type" " "object",
+ "properties":
+ {
+ "name":
+ {
+ "description": "XAttr name.",
+ "type" : "string",
+ "required" : true
+ },
+ "value":
+ {
+ "description": "XAttr value.",
+ "type" : "string"
+ }
+ }
+ }
+ }
+ }
+}
++---------------------------------
+
+
+** {XAttrNames JSON Schema}
+
++---------------------------------
+{
+ "name" : "XAttrNames",
+ "properties":
+ {
+ "XAttrNames":
+ {
+ "description": "XAttr names.",
+ "type" : "string"
+ "required" : true
+ }
+ }
+}
++---------------------------------
+
+
** {Boolean JSON Schema}
+---------------------------------
@@ -1688,6 +1940,83 @@ var tokenProperties =
*----------------+-------------------------------------------------------------------+
+** {XAttr Name}
+
+*----------------+-------------------------------------------------------------------+
+|| Name | <<<xattr.name>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description | The XAttr name of a file/directory. |
+*----------------+-------------------------------------------------------------------+
+|| Type | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | \<empty\> |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values | Any string prefixed with user./trusted./system./security.. |
+*----------------+-------------------------------------------------------------------+
+|| Syntax | Any string prefixed with user./trusted./system./security.. |
+*----------------+-------------------------------------------------------------------+
+
+
+** {XAttr Value}
+
+*----------------+-------------------------------------------------------------------+
+|| Name | <<<xattr.value>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description | The XAttr value of a file/directory. |
+*----------------+-------------------------------------------------------------------+
+|| Type | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | \<empty\> |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values | An encoded value. |
+*----------------+-------------------------------------------------------------------+
+|| Syntax | Enclosed in double quotes or prefixed with 0x or 0s. |
+*----------------+-------------------------------------------------------------------+
+
+ See also:
+ {{{./ExtendedAttributes.html}Extended Attributes}}
+
+
+** {XAttr set flag}
+
+*----------------+-------------------------------------------------------------------+
+|| Name | <<<flag>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description | The XAttr set flag. |
+*----------------+-------------------------------------------------------------------+
+|| Type | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | \<empty\> |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values | CREATE,REPLACE. |
+*----------------+-------------------------------------------------------------------+
+|| Syntax | CREATE,REPLACE. |
+*----------------+-------------------------------------------------------------------+
+
+ See also:
+ {{{./ExtendedAttributes.html}Extended Attributes}}
+
+
+** {XAttr value encoding}
+
+*----------------+-------------------------------------------------------------------+
+|| Name | <<<encoding>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description | The XAttr value encoding. |
+*----------------+-------------------------------------------------------------------+
+|| Type | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | \<empty\> |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values | text \| hex \| base64 |
+*----------------+-------------------------------------------------------------------+
+|| Syntax | text \| hex \| base64 |
+*----------------+-------------------------------------------------------------------+
+
+ See also:
+ {{{./ExtendedAttributes.html}Extended Attributes}}
+
+
** {Access Time}
*----------------+-------------------------------------------------------------------+
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml?rev=1609878&r1=1609877&r2=1609878&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml Sat Jul 12 02:24:40 2014
@@ -217,7 +217,7 @@
<subsection name="DFSAdmin Commands" id="dfsadminCommands">
<h4><code>dfsadmin -rollingUpgrade</code></h4>
- <source>hdfs dfsadmin -rollingUpgrade <query|start|finalize></source>
+ <source>hdfs dfsadmin -rollingUpgrade <query|prepare|finalize></source>
<p>
Execute a rolling upgrade action.
<ul><li>Options:<table>