You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aw...@apache.org on 2015/06/30 21:01:14 UTC
[01/27] hadoop git commit: HADOOP-11807. add a lint mode to
releasedocmaker (ramtin via aw)
Repository: hadoop
Updated Branches:
refs/heads/HADOOP-12111 084becdd7 -> 1c5ae8a84
HADOOP-11807. add a lint mode to releasedocmaker (ramtin via aw)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79ed0f95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79ed0f95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79ed0f95
Branch: refs/heads/HADOOP-12111
Commit: 79ed0f959ffc490414ca56a73e026500c24e7078
Parents: fe6c1bd
Author: Allen Wittenauer <aw...@apache.org>
Authored: Sat Jun 27 08:59:50 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Sat Jun 27 08:59:50 2015 -0700
----------------------------------------------------------------------
dev-support/releasedocmaker.py | 76 +++++++++++++++++---
hadoop-common-project/hadoop-common/CHANGES.txt | 2 +
2 files changed, 68 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/79ed0f95/dev-support/releasedocmaker.py
----------------------------------------------------------------------
diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py
index 2ccc1c0..8e68b3c 100755
--- a/dev-support/releasedocmaker.py
+++ b/dev-support/releasedocmaker.py
@@ -87,8 +87,15 @@ def notableclean(str):
str=str.rstrip()
return str
+# clean output dir
+def cleanOutputDir(dir):
+ files = os.listdir(dir)
+ for name in files:
+ os.remove(os.path.join(dir,name))
+ os.rmdir(dir)
+
def mstr(obj):
- if (obj == None):
+ if (obj is None):
return ""
return unicode(obj)
@@ -148,7 +155,7 @@ class Jira:
return mstr(self.fields['description'])
def getReleaseNote(self):
- if (self.notes == None):
+ if (self.notes is None):
field = self.parent.fieldIdMap['Release Note']
if (self.fields.has_key(field)):
self.notes=mstr(self.fields[field])
@@ -159,14 +166,14 @@ class Jira:
def getPriority(self):
ret = ""
pri = self.fields['priority']
- if(pri != None):
+ if(pri is not None):
ret = pri['name']
return mstr(ret)
def getAssignee(self):
ret = ""
mid = self.fields['assignee']
- if(mid != None):
+ if(mid is not None):
ret = mid['displayName']
return mstr(ret)
@@ -182,21 +189,21 @@ class Jira:
def getType(self):
ret = ""
mid = self.fields['issuetype']
- if(mid != None):
+ if(mid is not None):
ret = mid['name']
return mstr(ret)
def getReporter(self):
ret = ""
mid = self.fields['reporter']
- if(mid != None):
+ if(mid is not None):
ret = mid['displayName']
return mstr(ret)
def getProject(self):
ret = ""
mid = self.fields['project']
- if(mid != None):
+ if(mid is not None):
ret = mid['key']
return mstr(ret)
@@ -214,7 +221,7 @@ class Jira:
return False
def getIncompatibleChange(self):
- if (self.incompat == None):
+ if (self.incompat is None):
field = self.parent.fieldIdMap['Hadoop Flags']
self.reviewed=False
self.incompat=False
@@ -227,6 +234,24 @@ class Jira:
self.reviewed=True
return self.incompat
+ def checkMissingComponent(self):
+ if (len(self.fields['components'])>0):
+ return False
+ return True
+
+ def checkMissingAssignee(self):
+ if (self.fields['assignee'] is not None):
+ return False
+ return True
+
+ def checkVersionString(self):
+ field = self.parent.fieldIdMap['Fix Version/s']
+ for h in self.fields[field]:
+ found = re.match('^((\d+)(\.\d+)*).*$|^(\w+\-\d+)$', h['name'])
+ if not found:
+ return True
+ return False
+
def getReleaseDate(self,version):
for j in range(len(self.fields['fixVersions'])):
if self.fields['fixVersions'][j]==version:
@@ -339,9 +364,11 @@ def main():
help="build an index file")
parser.add_option("-u","--usetoday", dest="usetoday", action="store_true",
help="use current date for unreleased versions")
+ parser.add_option("-n","--lint", dest="lint", action="store_true",
+ help="use lint flag to exit on failures")
(options, args) = parser.parse_args()
- if (options.versions == None):
+ if (options.versions is None):
options.versions = []
if (len(args) > 2):
@@ -396,6 +423,9 @@ def main():
reloutputs.writeAll(relhead)
choutputs.writeAll(chhead)
+ errorCount=0
+ warningCount=0
+ lintMessage=""
incompatlist=[]
buglist=[]
improvementlist=[]
@@ -408,6 +438,14 @@ def main():
for jira in sorted(jlist):
if jira.getIncompatibleChange():
incompatlist.append(jira)
+ if (len(jira.getReleaseNote())==0):
+ warningCount+=1
+
+ if jira.checkVersionString():
+ warningCount+=1
+
+ if jira.checkMissingComponent() or jira.checkMissingAssignee():
+ errorCount+=1
elif jira.getType() == "Bug":
buglist.append(jira)
elif jira.getType() == "Improvement":
@@ -431,15 +469,33 @@ def main():
reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n")
reloutputs.writeKeyRaw(jira.getProject(), line)
line ='\n**WARNING: No release note provided for this incompatible change.**\n\n'
- print 'WARNING: incompatible change %s lacks release notes.' % (notableclean(jira.getId()))
+ lintMessage += "\nWARNING: incompatible change %s lacks release notes." % (notableclean(jira.getId()))
reloutputs.writeKeyRaw(jira.getProject(), line)
+ if jira.checkVersionString():
+ lintMessage += "\nWARNING: Version string problem for %s " % jira.getId()
+
+ if (jira.checkMissingComponent() or jira.checkMissingAssignee()):
+ errorMessage=[]
+ jira.checkMissingComponent() and errorMessage.append("component")
+ jira.checkMissingAssignee() and errorMessage.append("assignee")
+ lintMessage += "\nERROR: missing %s for %s " % (" and ".join(errorMessage) , jira.getId())
+
if (len(jira.getReleaseNote())>0):
reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n")
reloutputs.writeKeyRaw(jira.getProject(), line)
line ='\n%s\n\n' % (tableclean(jira.getReleaseNote()))
reloutputs.writeKeyRaw(jira.getProject(), line)
+ if (options.lint is True):
+ print lintMessage
+ print "======================================="
+ print "Error:%d, Warning:%d \n" % (errorCount, warningCount)
+
+ if (errorCount>0):
+ cleanOutputDir(version)
+ sys.exit(1)
+
reloutputs.writeAll("\n\n")
reloutputs.close()
http://git-wip-us.apache.org/repos/asf/hadoop/blob/79ed0f95/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 92e1bfa..18c73c0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -230,6 +230,8 @@ Trunk (Unreleased)
HADOOP-11142. Remove hdfs dfs reference from file system shell
documentation (Kengo Seki via aw)
+ HADOOP-11807. add a lint mode to releasedocmaker (ramtin via aw)
+
BUG FIXES
HADOOP-11473. test-patch says "-1 overall" even when all checks are +1
[23/27] hadoop git commit: HADOOP-12154. FileSystem#getUsed() returns
the file length only from root '/' (Contributed by J.Andreina)
Posted by aw...@apache.org.
HADOOP-12154. FileSystem#getUsed() returns the file length only from root '/' (Contributed by J.Andreina)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d99017f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d99017f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d99017f
Branch: refs/heads/HADOOP-12111
Commit: 6d99017f38f5a158b5cb65c74688b4c833e4e35f
Parents: 77588e1
Author: Vinayakumar B <vi...@apache.org>
Authored: Tue Jun 30 15:25:20 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Tue Jun 30 15:25:20 2015 +0530
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../java/org/apache/hadoop/fs/FileSystem.java | 6 ++---
.../org/apache/hadoop/hdfs/TestDFSShell.java | 26 ++++++++++++++++++++
3 files changed, 32 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d99017f/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 50192ae..2cf9509 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -899,6 +899,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12089. StorageException complaining " no lease ID" when updating
FolderLastModifiedTime in WASB. (Duo Xu via cnauroth)
+ HADOOP-12154. FileSystem#getUsed() returns the file length only from root '/'
+ (J.Andreina via vinayakumarb)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d99017f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 1d7bc87..c98074a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -2085,9 +2085,9 @@ public abstract class FileSystem extends Configured implements Closeable {
/** Return the total size of all files in the filesystem.*/
public long getUsed() throws IOException{
long used = 0;
- FileStatus[] files = listStatus(new Path("/"));
- for(FileStatus file:files){
- used += file.getLen();
+ RemoteIterator<LocatedFileStatus> files = listFiles(new Path("/"), true);
+ while (files.hasNext()) {
+ used += files.next().getLen();
}
return used;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d99017f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 2df31c4..1386124 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -913,6 +913,32 @@ public class TestDFSShell {
cluster.shutdown();
}
}
+
+ @Test(timeout = 30000)
+ public void testTotalSizeOfAllFiles() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+ FileSystem fs = cluster.getFileSystem();
+ // create file under root
+ FSDataOutputStream File1 = fs.create(new Path("/File1"));
+ File1.write("hi".getBytes());
+ File1.close();
+ // create file under sub-folder
+ FSDataOutputStream File2 = fs.create(new Path("/Folder1/File2"));
+ File2.write("hi".getBytes());
+ File2.close();
+ // getUsed() should return total length of all the files in Filesystem
+ assertEquals(4, fs.getUsed());
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ cluster = null;
+ }
+ }
+ }
+
private static void runCount(String path, long dirs, long files, FsShell shell
) throws IOException {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
[18/27] hadoop git commit: Merge branch 'trunk' of
https://git-wip-us.apache.org/repos/asf/hadoop into trunk
Posted by aw...@apache.org.
Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34ee0b9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34ee0b9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34ee0b9b
Branch: refs/heads/HADOOP-12111
Commit: 34ee0b9b4797093f5aeb0d55f32cf1b74b02e1c2
Parents: 8e1bdc1 4672315
Author: Ming Ma <mi...@apache.org>
Authored: Mon Jun 29 14:37:44 2015 -0700
Committer: Ming Ma <mi...@apache.org>
Committed: Mon Jun 29 14:37:44 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../api/records/impl/pb/SerializedExceptionPBImpl.java | 2 +-
.../records/impl/pb/TestSerializedExceptionPBImpl.java | 12 ++++++++++--
3 files changed, 14 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
[19/27] hadoop git commit: HDFS-8659. Block scanner INFO message is
spamming logs. Contributed by Yongjun Zhang.
Posted by aw...@apache.org.
HDFS-8659. Block scanner INFO message is spamming logs. Contributed by Yongjun Zhang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43a1288f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43a1288f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43a1288f
Branch: refs/heads/HADOOP-12111
Commit: 43a1288fff5e323895fde66b9e3eb31a7412fcc2
Parents: 34ee0b9
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Mon Jun 29 14:35:39 2015 -0700
Committer: Yongjun Zhang <yz...@cloudera.com>
Committed: Mon Jun 29 14:43:56 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
.../org/apache/hadoop/hdfs/server/datanode/BlockScanner.java | 2 +-
.../apache/hadoop/hdfs/server/datanode/VolumeScanner.java | 8 ++++----
3 files changed, 7 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43a1288f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0c56f2b..eb006eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -985,6 +985,8 @@ Release 2.7.2 - UNRELEASED
IMPROVEMENTS
+ HDFS-8659. Block scanner INFO message is spamming logs. (Yongjun Zhang)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43a1288f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index 9c4dd10..be6aa83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -317,7 +317,7 @@ public class BlockScanner {
*/
synchronized void markSuspectBlock(String storageId, ExtendedBlock block) {
if (!isEnabled()) {
- LOG.info("Not scanning suspicious block {} on {}, because the block " +
+ LOG.debug("Not scanning suspicious block {} on {}, because the block " +
"scanner is disabled.", block, storageId);
return;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/43a1288f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index 615abe9..ff655c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -656,24 +656,24 @@ public class VolumeScanner extends Thread {
public synchronized void markSuspectBlock(ExtendedBlock block) {
if (stopping) {
- LOG.info("{}: Not scheduling suspect block {} for " +
+ LOG.debug("{}: Not scheduling suspect block {} for " +
"rescanning, because this volume scanner is stopping.", this, block);
return;
}
Boolean recent = recentSuspectBlocks.getIfPresent(block);
if (recent != null) {
- LOG.info("{}: Not scheduling suspect block {} for " +
+ LOG.debug("{}: Not scheduling suspect block {} for " +
"rescanning, because we rescanned it recently.", this, block);
return;
}
if (suspectBlocks.contains(block)) {
- LOG.info("{}: suspect block {} is already queued for " +
+ LOG.debug("{}: suspect block {} is already queued for " +
"rescanning.", this, block);
return;
}
suspectBlocks.add(block);
recentSuspectBlocks.put(block, true);
- LOG.info("{}: Scheduling suspect block {} for rescanning.", this, block);
+ LOG.debug("{}: Scheduling suspect block {} for rescanning.", this, block);
notify(); // wake scanner thread.
}
[17/27] hadoop git commit: HADOOP-12107. long running apps may have a
huge number of StatisticsData instances under FileSystem (Sangjin Lee via
Ming Ma)
Posted by aw...@apache.org.
HADOOP-12107. long running apps may have a huge number of StatisticsData instances under FileSystem (Sangjin Lee via Ming Ma)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e1bdc17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e1bdc17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e1bdc17
Branch: refs/heads/HADOOP-12111
Commit: 8e1bdc17d9134e01115ae7c929503d8ac0325207
Parents: 460e98f
Author: Ming Ma <mi...@apache.org>
Authored: Mon Jun 29 14:37:38 2015 -0700
Committer: Ming Ma <mi...@apache.org>
Committed: Mon Jun 29 14:37:38 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +
.../java/org/apache/hadoop/fs/FileSystem.java | 140 +++++++++++++------
.../apache/hadoop/fs/FCStatisticsBaseTest.java | 56 +++++++-
3 files changed, 155 insertions(+), 44 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e1bdc17/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index a9b44e3..50192ae 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -490,6 +490,9 @@ Trunk (Unreleased)
HADOOP-11347. RawLocalFileSystem#mkdir and create should honor umask (Varun
Saxena via Colin P. McCabe)
+ HADOOP-12107. long running apps may have a huge number of StatisticsData
+ instances under FileSystem (Sangjin Lee via Ming Ma)
+
OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e1bdc17/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 3f9e3bd..1d7bc87 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -20,7 +20,8 @@ package org.apache.hadoop.fs;
import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.lang.ref.WeakReference;
+import java.lang.ref.PhantomReference;
+import java.lang.ref.ReferenceQueue;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
@@ -32,7 +33,6 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.Iterator;
-import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
@@ -2920,16 +2920,6 @@ public abstract class FileSystem extends Configured implements Closeable {
volatile int readOps;
volatile int largeReadOps;
volatile int writeOps;
- /**
- * Stores a weak reference to the thread owning this StatisticsData.
- * This allows us to remove StatisticsData objects that pertain to
- * threads that no longer exist.
- */
- final WeakReference<Thread> owner;
-
- StatisticsData(WeakReference<Thread> owner) {
- this.owner = owner;
- }
/**
* Add another StatisticsData object to this one.
@@ -3000,17 +2990,37 @@ public abstract class FileSystem extends Configured implements Closeable {
* Thread-local data.
*/
private final ThreadLocal<StatisticsData> threadData;
-
+
/**
- * List of all thread-local data areas. Protected by the Statistics lock.
+ * Set of all thread-local data areas. Protected by the Statistics lock.
+ * The references to the statistics data are kept using phantom references
+ * to the associated threads. Proper clean-up is performed by the cleaner
+ * thread when the threads are garbage collected.
*/
- private LinkedList<StatisticsData> allData;
+ private final Set<StatisticsDataReference> allData;
+
+ /**
+ * Global reference queue and a cleaner thread that manage statistics data
+ * references from all filesystem instances.
+ */
+ private static final ReferenceQueue<Thread> STATS_DATA_REF_QUEUE;
+ private static final Thread STATS_DATA_CLEANER;
+
+ static {
+ STATS_DATA_REF_QUEUE = new ReferenceQueue<Thread>();
+ // start a single daemon cleaner thread
+ STATS_DATA_CLEANER = new Thread(new StatisticsDataReferenceCleaner());
+ STATS_DATA_CLEANER.
+ setName(StatisticsDataReferenceCleaner.class.getName());
+ STATS_DATA_CLEANER.setDaemon(true);
+ STATS_DATA_CLEANER.start();
+ }
public Statistics(String scheme) {
this.scheme = scheme;
- this.rootData = new StatisticsData(null);
+ this.rootData = new StatisticsData();
this.threadData = new ThreadLocal<StatisticsData>();
- this.allData = null;
+ this.allData = new HashSet<StatisticsDataReference>();
}
/**
@@ -3020,7 +3030,7 @@ public abstract class FileSystem extends Configured implements Closeable {
*/
public Statistics(Statistics other) {
this.scheme = other.scheme;
- this.rootData = new StatisticsData(null);
+ this.rootData = new StatisticsData();
other.visitAll(new StatisticsAggregator<Void>() {
@Override
public void accept(StatisticsData data) {
@@ -3032,6 +3042,63 @@ public abstract class FileSystem extends Configured implements Closeable {
}
});
this.threadData = new ThreadLocal<StatisticsData>();
+ this.allData = new HashSet<StatisticsDataReference>();
+ }
+
+ /**
+ * A phantom reference to a thread that also includes the data associated
+ * with that thread. On the thread being garbage collected, it is enqueued
+ * to the reference queue for clean-up.
+ */
+ private class StatisticsDataReference extends PhantomReference<Thread> {
+ private final StatisticsData data;
+
+ public StatisticsDataReference(StatisticsData data, Thread thread) {
+ super(thread, STATS_DATA_REF_QUEUE);
+ this.data = data;
+ }
+
+ public StatisticsData getData() {
+ return data;
+ }
+
+ /**
+ * Performs clean-up action when the associated thread is garbage
+ * collected.
+ */
+ public void cleanUp() {
+ // use the statistics lock for safety
+ synchronized (Statistics.this) {
+ /*
+ * If the thread that created this thread-local data no longer exists,
+ * remove the StatisticsData from our list and fold the values into
+ * rootData.
+ */
+ rootData.add(data);
+ allData.remove(this);
+ }
+ }
+ }
+
+ /**
+ * Background action to act on references being removed.
+ */
+ private static class StatisticsDataReferenceCleaner implements Runnable {
+ @Override
+ public void run() {
+ while (true) {
+ try {
+ StatisticsDataReference ref =
+ (StatisticsDataReference)STATS_DATA_REF_QUEUE.remove();
+ ref.cleanUp();
+ } catch (Throwable th) {
+ // the cleaner thread should continue to run even if there are
+ // exceptions, including InterruptedException
+ LOG.warn("exception in the cleaner thread but it will continue to "
+ + "run", th);
+ }
+ }
+ }
}
/**
@@ -3040,14 +3107,12 @@ public abstract class FileSystem extends Configured implements Closeable {
public StatisticsData getThreadStatistics() {
StatisticsData data = threadData.get();
if (data == null) {
- data = new StatisticsData(
- new WeakReference<Thread>(Thread.currentThread()));
+ data = new StatisticsData();
threadData.set(data);
+ StatisticsDataReference ref =
+ new StatisticsDataReference(data, Thread.currentThread());
synchronized(this) {
- if (allData == null) {
- allData = new LinkedList<StatisticsData>();
- }
- allData.add(data);
+ allData.add(ref);
}
}
return data;
@@ -3105,21 +3170,9 @@ public abstract class FileSystem extends Configured implements Closeable {
*/
private synchronized <T> T visitAll(StatisticsAggregator<T> visitor) {
visitor.accept(rootData);
- if (allData != null) {
- for (Iterator<StatisticsData> iter = allData.iterator();
- iter.hasNext(); ) {
- StatisticsData data = iter.next();
- visitor.accept(data);
- if (data.owner.get() == null) {
- /*
- * If the thread that created this thread-local data no
- * longer exists, remove the StatisticsData from our list
- * and fold the values into rootData.
- */
- rootData.add(data);
- iter.remove();
- }
- }
+ for (StatisticsDataReference ref: allData) {
+ StatisticsData data = ref.getData();
+ visitor.accept(data);
}
return visitor.aggregate();
}
@@ -3226,7 +3279,7 @@ public abstract class FileSystem extends Configured implements Closeable {
@Override
public String toString() {
return visitAll(new StatisticsAggregator<String>() {
- private StatisticsData total = new StatisticsData(null);
+ private StatisticsData total = new StatisticsData();
@Override
public void accept(StatisticsData data) {
@@ -3259,7 +3312,7 @@ public abstract class FileSystem extends Configured implements Closeable {
*/
public void reset() {
visitAll(new StatisticsAggregator<Void>() {
- private StatisticsData total = new StatisticsData(null);
+ private StatisticsData total = new StatisticsData();
@Override
public void accept(StatisticsData data) {
@@ -3281,6 +3334,11 @@ public abstract class FileSystem extends Configured implements Closeable {
public String getScheme() {
return scheme;
}
+
+ @VisibleForTesting
+ synchronized int getAllThreadLocalDataSize() {
+ return allData.size();
+ }
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e1bdc17/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java
index 90337a6..3e33362 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java
@@ -18,26 +18,34 @@
package org.apache.hadoop.fs;
+import static org.apache.hadoop.fs.FileContextTestHelper.createFile;
+
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.List;
import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Test;
+import com.google.common.base.Supplier;
import com.google.common.util.concurrent.Uninterruptibles;
-import static org.apache.hadoop.fs.FileContextTestHelper.*;
-
/**
* <p>
* Base class to test {@link FileContext} Statistics.
* </p>
*/
public abstract class FCStatisticsBaseTest {
-
static protected int blockSize = 512;
static protected int numBlocks = 1;
@@ -102,6 +110,48 @@ public abstract class FCStatisticsBaseTest {
fc.delete(filePath, true);
}
+ @Test(timeout=60000)
+ public void testStatisticsThreadLocalDataCleanUp() throws Exception {
+ final Statistics stats = new Statistics("test");
+ // create a small thread pool to test the statistics
+ final int size = 2;
+ ExecutorService es = Executors.newFixedThreadPool(size);
+ List<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>(size);
+ for (int i = 0; i < size; i++) {
+ tasks.add(new Callable<Boolean>() {
+ public Boolean call() {
+ // this populates the data set in statistics
+ stats.incrementReadOps(1);
+ return true;
+ }
+ });
+ }
+ // run the threads
+ es.invokeAll(tasks);
+ // assert that the data size is exactly the number of threads
+ final AtomicInteger allDataSize = new AtomicInteger(0);
+ allDataSize.set(stats.getAllThreadLocalDataSize());
+ Assert.assertEquals(size, allDataSize.get());
+ Assert.assertEquals(size, stats.getReadOps());
+ // force the GC to collect the threads by shutting down the thread pool
+ es.shutdownNow();
+ es.awaitTermination(1, TimeUnit.MINUTES);
+ es = null;
+ System.gc();
+
+ // wait for up to 10 seconds
+ GenericTestUtils.waitFor(new Supplier<Boolean>() {
+ @Override
+ public Boolean get() {
+ int size = stats.getAllThreadLocalDataSize();
+ allDataSize.set(size);
+ return size == 0;
+ }
+ }, 1000, 10*1000);
+ Assert.assertEquals(0, allDataSize.get());
+ Assert.assertEquals(size, stats.getReadOps());
+ }
+
/**
* Bytes read may be different for different file systems. This method should
* throw assertion error if bytes read are incorrect.
[07/27] hadoop git commit: HDFS-8586. Dead Datanode is allocated for
write when client is from deadnode (Contributed by Brahma Reddy Battula)
Posted by aw...@apache.org.
HDFS-8586. Dead Datanode is allocated for write when client is from deadnode (Contributed by Brahma Reddy Battula)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88ceb382
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88ceb382
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88ceb382
Branch: refs/heads/HADOOP-12111
Commit: 88ceb382ef45bd09cf004cf44aedbabaf3976759
Parents: a95d39f
Author: Vinayakumar B <vi...@apache.org>
Authored: Mon Jun 29 15:25:03 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Mon Jun 29 15:25:03 2015 +0530
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../BlockPlacementPolicyDefault.java | 3 +-
.../hdfs/server/namenode/TestDeadDatanode.java | 42 ++++++++++++++++++++
3 files changed, 47 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/88ceb382/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b89e10c..3800184 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -968,6 +968,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8546. Prune cached replicas from DatanodeDescriptor state on replica
invalidation. (wang)
+ HDFS-8586. Dead Datanode is allocated for write when client is from deadnode
+ (Brahma Reddy Battula via vinayakumarb)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/88ceb382/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 21ad01d..9023e0a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -454,7 +454,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
maxNodesPerRack, results, avoidStaleNodes, storageTypes);
}
- if (preferLocalNode && localMachine instanceof DatanodeDescriptor) {
+ if (preferLocalNode && localMachine instanceof DatanodeDescriptor
+ && clusterMap.contains(localMachine)) {
DatanodeDescriptor localDatanode = (DatanodeDescriptor) localMachine;
// otherwise try local machine first
if (excludedNodes.add(localMachine)) { // was not in the excluded list
http://git-wip-us.apache.org/repos/asf/hadoop/blob/88ceb382/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index ff70c3f..c5262d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -18,9 +18,11 @@
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import java.io.IOException;
+import java.util.HashSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -31,6 +33,9 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
@@ -43,6 +48,7 @@ import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.net.Node;
import org.junit.After;
import org.junit.Test;
@@ -126,4 +132,40 @@ public class TestDeadDatanode {
assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
.getAction());
}
+
+ @Test
+ public void testDeadNodeAsBlockTarget() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
+ conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+ cluster.waitActive();
+
+ String poolId = cluster.getNamesystem().getBlockPoolId();
+ // wait for datanode to be marked live
+ DataNode dn = cluster.getDataNodes().get(0);
+ DatanodeRegistration reg = DataNodeTestUtils.getDNRegistrationForBP(cluster
+ .getDataNodes().get(0), poolId);
+ // Get the updated datanode descriptor
+ BlockManager bm = cluster.getNamesystem().getBlockManager();
+ DatanodeManager dm = bm.getDatanodeManager();
+ Node clientNode = dm.getDatanode(reg);
+
+ DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true,
+ 20000);
+
+ // Shutdown and wait for datanode to be marked dead
+ dn.shutdown();
+ DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false,
+ 20000);
+ // Get the updated datanode descriptor available in DNM
+ // choose the targets, but local node should not get selected as this is not
+ // part of the cluster anymore
+ DatanodeStorageInfo[] results = bm.chooseTarget4NewBlock("/hello", 3,
+ clientNode, new HashSet<Node>(), 256 * 1024 * 1024L, null, (byte) 7);
+ for (DatanodeStorageInfo datanodeStorageInfo : results) {
+ assertFalse("Dead node should not be choosen", datanodeStorageInfo
+ .getDatanodeDescriptor().equals(clientNode));
+ }
+ }
}
[21/27] hadoop git commit: HDFS-8493. Consolidate truncate() related
implementation in a single class. Contributed by Rakesh R.
Posted by aw...@apache.org.
HDFS-8493. Consolidate truncate() related implementation in a single class. Contributed by Rakesh R.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3797f9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3797f9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3797f9f
Branch: refs/heads/HADOOP-12111
Commit: d3797f9f3cf502b7bfee3b64c641807b276c6faf
Parents: 8e33372
Author: Haohui Mai <wh...@apache.org>
Authored: Mon Jun 29 16:40:46 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Mon Jun 29 16:45:35 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../hdfs/server/namenode/FSDirTruncateOp.java | 361 +++++++++++++++++++
.../hdfs/server/namenode/FSDirectory.java | 95 -----
.../hdfs/server/namenode/FSEditLogLoader.java | 6 +-
.../hdfs/server/namenode/FSNamesystem.java | 237 ++----------
.../hdfs/server/namenode/TestFileTruncate.java | 8 +-
6 files changed, 402 insertions(+), 308 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3797f9f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 108a6c0..3535f90 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -685,6 +685,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8653. Code cleanup for DatanodeManager, DatanodeDescriptor and
DatanodeStorageInfo. (Zhe Zhang via wang)
+ HDFS-8493. Consolidate truncate() related implementation in a single class.
+ (Rakesh R via wheat9)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3797f9f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
new file mode 100644
index 0000000..9fc9def
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
@@ -0,0 +1,361 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp;
+import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Helper class to perform truncate operation.
+ */
+final class FSDirTruncateOp {
+
+ /**
+ * Private constructor for preventing FSDirTruncateOp object creation.
+ * Static-only class.
+ */
+ private FSDirTruncateOp() {}
+
+ /**
+ * Truncate a file to a given size.
+ *
+ * @param fsn namespace
+ * @param srcArg path name
+ * @param newLength the target file size
+ * @param clientName client name
+ * @param clientMachine client machine info
+ * @param mtime modified time
+ * @param toRemoveBlocks to be removed blocks
+ * @param pc permission checker to check fs permission
+ * @return tuncate result
+ * @throws IOException
+ */
+ static TruncateResult truncate(final FSNamesystem fsn, final String srcArg,
+ final long newLength, final String clientName,
+ final String clientMachine, final long mtime,
+ final BlocksMapUpdateInfo toRemoveBlocks, final FSPermissionChecker pc)
+ throws IOException, UnresolvedLinkException {
+ assert fsn.hasWriteLock();
+
+ FSDirectory fsd = fsn.getFSDirectory();
+ byte[][] pathComponents = FSDirectory
+ .getPathComponentsForReservedPath(srcArg);
+ final String src;
+ final INodesInPath iip;
+ final boolean onBlockBoundary;
+ Block truncateBlock = null;
+ fsd.writeLock();
+ try {
+ src = fsd.resolvePath(pc, srcArg, pathComponents);
+ iip = fsd.getINodesInPath4Write(src, true);
+ if (fsn.isPermissionEnabled()) {
+ fsd.checkPathAccess(pc, iip, FsAction.WRITE);
+ }
+ INodeFile file = INodeFile.valueOf(iip.getLastINode(), src);
+ final BlockStoragePolicy lpPolicy = fsn.getBlockManager()
+ .getStoragePolicy("LAZY_PERSIST");
+
+ if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) {
+ throw new UnsupportedOperationException(
+ "Cannot truncate lazy persist file " + src);
+ }
+
+ // Check if the file is already being truncated with the same length
+ final BlockInfo last = file.getLastBlock();
+ if (last != null && last.getBlockUCState()
+ == BlockUCState.UNDER_RECOVERY) {
+ final Block truncatedBlock = ((BlockInfoUnderConstruction) last)
+ .getTruncateBlock();
+ if (truncatedBlock != null) {
+ final long truncateLength = file.computeFileSize(false, false)
+ + truncatedBlock.getNumBytes();
+ if (newLength == truncateLength) {
+ return new TruncateResult(false, fsd.getAuditFileInfo(iip));
+ }
+ }
+ }
+
+ // Opening an existing file for truncate. May need lease recovery.
+ fsn.recoverLeaseInternal(RecoverLeaseOp.TRUNCATE_FILE, iip, src,
+ clientName, clientMachine, false);
+ // Truncate length check.
+ long oldLength = file.computeFileSize();
+ if (oldLength == newLength) {
+ return new TruncateResult(true, fsd.getAuditFileInfo(iip));
+ }
+ if (oldLength < newLength) {
+ throw new HadoopIllegalArgumentException(
+ "Cannot truncate to a larger file size. Current size: " + oldLength
+ + ", truncate size: " + newLength + ".");
+ }
+ // Perform INodeFile truncation.
+ final QuotaCounts delta = new QuotaCounts.Builder().build();
+ onBlockBoundary = unprotectedTruncate(fsn, iip, newLength,
+ toRemoveBlocks, mtime, delta);
+ if (!onBlockBoundary) {
+ // Open file for write, but don't log into edits
+ long lastBlockDelta = file.computeFileSize() - newLength;
+ assert lastBlockDelta > 0 : "delta is 0 only if on block bounday";
+ truncateBlock = prepareFileForTruncate(fsn, iip, clientName,
+ clientMachine, lastBlockDelta, null);
+ }
+
+ // update the quota: use the preferred block size for UC block
+ fsd.updateCountNoQuotaCheck(iip, iip.length() - 1, delta);
+ } finally {
+ fsd.writeUnlock();
+ }
+
+ fsn.getEditLog().logTruncate(src, clientName, clientMachine, newLength,
+ mtime, truncateBlock);
+ return new TruncateResult(onBlockBoundary, fsd.getAuditFileInfo(iip));
+ }
+
+ /**
+ * Unprotected truncate implementation. Unlike
+ * {@link FSDirTruncateOp#truncate}, this will not schedule block recovery.
+ *
+ * @param fsn namespace
+ * @param src path name
+ * @param clientName client name
+ * @param clientMachine client machine info
+ * @param newLength the target file size
+ * @param mtime modified time
+ * @param truncateBlock truncate block
+ * @throws IOException
+ */
+ static void unprotectedTruncate(final FSNamesystem fsn, final String src,
+ final String clientName, final String clientMachine,
+ final long newLength, final long mtime, final Block truncateBlock)
+ throws UnresolvedLinkException, QuotaExceededException,
+ SnapshotAccessControlException, IOException {
+ assert fsn.hasWriteLock();
+
+ FSDirectory fsd = fsn.getFSDirectory();
+ INodesInPath iip = fsd.getINodesInPath(src, true);
+ INodeFile file = iip.getLastINode().asFile();
+ BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
+ boolean onBlockBoundary = unprotectedTruncate(fsn, iip, newLength,
+ collectedBlocks, mtime, null);
+
+ if (!onBlockBoundary) {
+ BlockInfo oldBlock = file.getLastBlock();
+ Block tBlk = prepareFileForTruncate(fsn, iip, clientName, clientMachine,
+ file.computeFileSize() - newLength, truncateBlock);
+ assert Block.matchingIdAndGenStamp(tBlk, truncateBlock) &&
+ tBlk.getNumBytes() == truncateBlock.getNumBytes() :
+ "Should be the same block.";
+ if (oldBlock.getBlockId() != tBlk.getBlockId()
+ && !file.isBlockInLatestSnapshot(oldBlock)) {
+ fsn.getBlockManager().removeBlockFromMap(oldBlock);
+ }
+ }
+ assert onBlockBoundary == (truncateBlock == null) :
+ "truncateBlock is null iff on block boundary: " + truncateBlock;
+ fsn.removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
+ }
+
+ /**
+ * Convert current INode to UnderConstruction. Recreate lease. Create new
+ * block for the truncated copy. Schedule truncation of the replicas.
+ *
+ * @param fsn namespace
+ * @param iip inodes in the path containing the file
+ * @param leaseHolder lease holder
+ * @param clientMachine client machine info
+ * @param lastBlockDelta last block delta size
+ * @param newBlock new block
+ * @return the returned block will be written to editLog and passed back
+ * into this method upon loading.
+ * @throws IOException
+ */
+ @VisibleForTesting
+ static Block prepareFileForTruncate(FSNamesystem fsn, INodesInPath iip,
+ String leaseHolder, String clientMachine, long lastBlockDelta,
+ Block newBlock) throws IOException {
+ assert fsn.hasWriteLock();
+
+ INodeFile file = iip.getLastINode().asFile();
+ file.recordModification(iip.getLatestSnapshotId());
+ file.toUnderConstruction(leaseHolder, clientMachine);
+ assert file.isUnderConstruction() : "inode should be under construction.";
+ fsn.getLeaseManager().addLease(
+ file.getFileUnderConstructionFeature().getClientName(), file.getId());
+ boolean shouldRecoverNow = (newBlock == null);
+ BlockInfo oldBlock = file.getLastBlock();
+ boolean shouldCopyOnTruncate = shouldCopyOnTruncate(fsn, file, oldBlock);
+ if (newBlock == null) {
+ newBlock = (shouldCopyOnTruncate) ? fsn.createNewBlock() : new Block(
+ oldBlock.getBlockId(), oldBlock.getNumBytes(),
+ fsn.nextGenerationStamp(fsn.getBlockIdManager().isLegacyBlock(
+ oldBlock)));
+ }
+
+ BlockInfoUnderConstruction truncatedBlockUC;
+ if (shouldCopyOnTruncate) {
+ // Add new truncateBlock into blocksMap and
+ // use oldBlock as a source for copy-on-truncate recovery
+ truncatedBlockUC = new BlockInfoUnderConstructionContiguous(newBlock,
+ file.getPreferredBlockReplication());
+ truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
+ truncatedBlockUC.setTruncateBlock(oldBlock);
+ file.setLastBlock(truncatedBlockUC,
+ fsn.getBlockManager().getStorages(oldBlock));
+ fsn.getBlockManager().addBlockCollection(truncatedBlockUC, file);
+
+ NameNode.stateChangeLog.debug(
+ "BLOCK* prepareFileForTruncate: Scheduling copy-on-truncate to new"
+ + " size {} new block {} old block {}",
+ truncatedBlockUC.getNumBytes(), newBlock,
+ truncatedBlockUC.getTruncateBlock());
+ } else {
+ // Use new generation stamp for in-place truncate recovery
+ fsn.getBlockManager().convertLastBlockToUnderConstruction(file,
+ lastBlockDelta);
+ oldBlock = file.getLastBlock();
+ assert !oldBlock.isComplete() : "oldBlock should be under construction";
+ truncatedBlockUC = (BlockInfoUnderConstruction) oldBlock;
+ truncatedBlockUC.setTruncateBlock(new Block(oldBlock));
+ truncatedBlockUC.getTruncateBlock().setNumBytes(
+ oldBlock.getNumBytes() - lastBlockDelta);
+ truncatedBlockUC.getTruncateBlock().setGenerationStamp(
+ newBlock.getGenerationStamp());
+
+ NameNode.stateChangeLog.debug(
+ "BLOCK* prepareFileForTruncate: {} Scheduling in-place block "
+ + "truncate to new size {}", truncatedBlockUC.getTruncateBlock()
+ .getNumBytes(), truncatedBlockUC);
+ }
+ if (shouldRecoverNow) {
+ truncatedBlockUC.initializeBlockRecovery(newBlock.getGenerationStamp());
+ }
+
+ return newBlock;
+ }
+
+ /**
+ * Truncate has the following properties:
+ * 1.) Any block deletions occur now.
+ * 2.) INode length is truncated now - new clients can only read up to
+ * the truncated length.
+ * 3.) INode will be set to UC and lastBlock set to UNDER_RECOVERY.
+ * 4.) NN will trigger DN truncation recovery and waits for DNs to report.
+ * 5.) File is considered UNDER_RECOVERY until truncation recovery
+ * completes.
+ * 6.) Soft and hard Lease expiration require truncation recovery to
+ * complete.
+ *
+ * @return true if on the block boundary or false if recovery is need
+ */
+ private static boolean unprotectedTruncate(FSNamesystem fsn,
+ INodesInPath iip, long newLength, BlocksMapUpdateInfo collectedBlocks,
+ long mtime, QuotaCounts delta) throws IOException {
+ assert fsn.hasWriteLock();
+
+ INodeFile file = iip.getLastINode().asFile();
+ int latestSnapshot = iip.getLatestSnapshotId();
+ file.recordModification(latestSnapshot, true);
+
+ verifyQuotaForTruncate(fsn, iip, file, newLength, delta);
+
+ long remainingLength =
+ file.collectBlocksBeyondMax(newLength, collectedBlocks);
+ file.excludeSnapshotBlocks(latestSnapshot, collectedBlocks);
+ file.setModificationTime(mtime);
+ // return whether on a block boundary
+ return (remainingLength - newLength) == 0;
+ }
+
+ private static void verifyQuotaForTruncate(FSNamesystem fsn,
+ INodesInPath iip, INodeFile file, long newLength, QuotaCounts delta)
+ throws QuotaExceededException {
+ FSDirectory fsd = fsn.getFSDirectory();
+ if (!fsn.isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
+ // Do not check quota if edit log is still being processed
+ return;
+ }
+ final BlockStoragePolicy policy = fsd.getBlockStoragePolicySuite()
+ .getPolicy(file.getStoragePolicyID());
+ file.computeQuotaDeltaForTruncate(newLength, policy, delta);
+ fsd.readLock();
+ try {
+ FSDirectory.verifyQuota(iip, iip.length() - 1, delta, null);
+ } finally {
+ fsd.readUnlock();
+ }
+ }
+
+ /**
+ * Defines if a replica needs to be copied on truncate or
+ * can be truncated in place.
+ */
+ private static boolean shouldCopyOnTruncate(FSNamesystem fsn, INodeFile file,
+ BlockInfo blk) {
+ if (!fsn.isUpgradeFinalized()) {
+ return true;
+ }
+ if (fsn.isRollingUpgrade()) {
+ return true;
+ }
+ return file.isBlockInLatestSnapshot(blk);
+ }
+
+ /**
+ * Result of truncate operation.
+ */
+ static class TruncateResult {
+ private final boolean result;
+ private final HdfsFileStatus stat;
+
+ public TruncateResult(boolean result, HdfsFileStatus stat) {
+ this.result = result;
+ this.stat = stat;
+ }
+
+ /**
+ * @return true if client does not need to wait for block recovery,
+ * false if client needs to wait for block recovery.
+ */
+ boolean getResult() {
+ return result;
+ }
+
+ /**
+ * @return file information.
+ */
+ HdfsFileStatus getFileStatus() {
+ return stat;
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3797f9f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index c807fba..ccee1ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.XAttrHelper;
-import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
@@ -49,11 +48,9 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.util.ByteArray;
import org.apache.hadoop.hdfs.util.EnumCounters;
import org.apache.hadoop.security.AccessControlException;
@@ -908,98 +905,6 @@ public class FSDirectory implements Closeable {
}
/**
- * FSEditLogLoader implementation.
- * Unlike FSNamesystem.truncate, this will not schedule block recovery.
- */
- void unprotectedTruncate(String src, String clientName, String clientMachine,
- long newLength, long mtime, Block truncateBlock)
- throws UnresolvedLinkException, QuotaExceededException,
- SnapshotAccessControlException, IOException {
- INodesInPath iip = getINodesInPath(src, true);
- INodeFile file = iip.getLastINode().asFile();
- BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
- boolean onBlockBoundary =
- unprotectedTruncate(iip, newLength, collectedBlocks, mtime, null);
-
- if(! onBlockBoundary) {
- BlockInfo oldBlock = file.getLastBlock();
- Block tBlk =
- getFSNamesystem().prepareFileForTruncate(iip,
- clientName, clientMachine, file.computeFileSize() - newLength,
- truncateBlock);
- assert Block.matchingIdAndGenStamp(tBlk, truncateBlock) &&
- tBlk.getNumBytes() == truncateBlock.getNumBytes() :
- "Should be the same block.";
- if(oldBlock.getBlockId() != tBlk.getBlockId() &&
- !file.isBlockInLatestSnapshot(oldBlock)) {
- getBlockManager().removeBlockFromMap(oldBlock);
- }
- }
- assert onBlockBoundary == (truncateBlock == null) :
- "truncateBlock is null iff on block boundary: " + truncateBlock;
- getFSNamesystem().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
- }
-
- boolean truncate(INodesInPath iip, long newLength,
- BlocksMapUpdateInfo collectedBlocks,
- long mtime, QuotaCounts delta)
- throws IOException {
- writeLock();
- try {
- return unprotectedTruncate(iip, newLength, collectedBlocks, mtime, delta);
- } finally {
- writeUnlock();
- }
- }
-
- /**
- * Truncate has the following properties:
- * 1.) Any block deletions occur now.
- * 2.) INode length is truncated now – new clients can only read up to
- * the truncated length.
- * 3.) INode will be set to UC and lastBlock set to UNDER_RECOVERY.
- * 4.) NN will trigger DN truncation recovery and waits for DNs to report.
- * 5.) File is considered UNDER_RECOVERY until truncation recovery completes.
- * 6.) Soft and hard Lease expiration require truncation recovery to complete.
- *
- * @return true if on the block boundary or false if recovery is need
- */
- boolean unprotectedTruncate(INodesInPath iip, long newLength,
- BlocksMapUpdateInfo collectedBlocks,
- long mtime, QuotaCounts delta) throws IOException {
- assert hasWriteLock();
- INodeFile file = iip.getLastINode().asFile();
- int latestSnapshot = iip.getLatestSnapshotId();
- file.recordModification(latestSnapshot, true);
-
- verifyQuotaForTruncate(iip, file, newLength, delta);
-
- long remainingLength =
- file.collectBlocksBeyondMax(newLength, collectedBlocks);
- file.excludeSnapshotBlocks(latestSnapshot, collectedBlocks);
- file.setModificationTime(mtime);
- // return whether on a block boundary
- return (remainingLength - newLength) == 0;
- }
-
- private void verifyQuotaForTruncate(INodesInPath iip, INodeFile file,
- long newLength, QuotaCounts delta) throws QuotaExceededException {
- if (!getFSNamesystem().isImageLoaded() || shouldSkipQuotaChecks()) {
- // Do not check quota if edit log is still being processed
- return;
- }
- final BlockStoragePolicy policy = getBlockStoragePolicySuite()
- .getPolicy(file.getStoragePolicyID());
- file.computeQuotaDeltaForTruncate(newLength, policy, delta);
- readLock();
- try {
- verifyQuota(iip, iip.length() - 1, delta, null);
- } finally {
- readUnlock();
- }
- }
-
- /**
* This method is always called with writeLock of FSDirectory held.
*/
public final void addToInodeMap(INode inode) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3797f9f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index df01edd..63ef985 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -901,9 +901,9 @@ public class FSEditLogLoader {
}
case OP_TRUNCATE: {
TruncateOp truncateOp = (TruncateOp) op;
- fsDir.unprotectedTruncate(truncateOp.src, truncateOp.clientName,
- truncateOp.clientMachine, truncateOp.newLength, truncateOp.timestamp,
- truncateOp.truncateBlock);
+ FSDirTruncateOp.unprotectedTruncate(fsNamesys, truncateOp.src,
+ truncateOp.clientName, truncateOp.clientMachine,
+ truncateOp.newLength, truncateOp.timestamp, truncateOp.truncateBlock);
break;
}
case OP_SET_STORAGE_POLICY: {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3797f9f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index e95007b..7c6d6a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -201,7 +201,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@@ -1831,218 +1830,44 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* block recovery to truncate the last block of the file.
*
* @return true if client does not need to wait for block recovery,
- * false if client needs to wait for block recovery.
+ * false if client needs to wait for block recovery.
*/
- boolean truncate(String src, long newLength,
- String clientName, String clientMachine,
- long mtime)
- throws IOException, UnresolvedLinkException {
+ boolean truncate(String src, long newLength, String clientName,
+ String clientMachine, long mtime) throws IOException,
+ UnresolvedLinkException {
+
requireEffectiveLayoutVersionForFeature(Feature.TRUNCATE);
- boolean ret;
+ final FSDirTruncateOp.TruncateResult r;
try {
- ret = truncateInt(src, newLength, clientName, clientMachine, mtime);
+ NameNode.stateChangeLog.debug(
+ "DIR* NameSystem.truncate: src={} newLength={}", src, newLength);
+ if (newLength < 0) {
+ throw new HadoopIllegalArgumentException(
+ "Cannot truncate to a negative file size: " + newLength + ".");
+ }
+ final FSPermissionChecker pc = getPermissionChecker();
+ checkOperation(OperationCategory.WRITE);
+ writeLock();
+ BlocksMapUpdateInfo toRemoveBlocks = new BlocksMapUpdateInfo();
+ try {
+ checkOperation(OperationCategory.WRITE);
+ checkNameNodeSafeMode("Cannot truncate for " + src);
+ r = FSDirTruncateOp.truncate(this, src, newLength, clientName,
+ clientMachine, mtime, toRemoveBlocks, pc);
+ } finally {
+ writeUnlock();
+ }
+ getEditLog().logSync();
+ if (!toRemoveBlocks.getToDeleteList().isEmpty()) {
+ removeBlocks(toRemoveBlocks);
+ toRemoveBlocks.clear();
+ }
+ logAuditEvent(true, "truncate", src, null, r.getFileStatus());
} catch (AccessControlException e) {
logAuditEvent(false, "truncate", src);
throw e;
}
- return ret;
- }
-
- boolean truncateInt(String srcArg, long newLength,
- String clientName, String clientMachine,
- long mtime)
- throws IOException, UnresolvedLinkException {
- String src = srcArg;
- NameNode.stateChangeLog.debug(
- "DIR* NameSystem.truncate: src={} newLength={}", src, newLength);
- if (newLength < 0) {
- throw new HadoopIllegalArgumentException(
- "Cannot truncate to a negative file size: " + newLength + ".");
- }
- HdfsFileStatus stat = null;
- FSPermissionChecker pc = getPermissionChecker();
- checkOperation(OperationCategory.WRITE);
- boolean res;
- byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
- writeLock();
- BlocksMapUpdateInfo toRemoveBlocks = new BlocksMapUpdateInfo();
- try {
- checkOperation(OperationCategory.WRITE);
- checkNameNodeSafeMode("Cannot truncate for " + src);
- src = dir.resolvePath(pc, src, pathComponents);
- res = truncateInternal(src, newLength, clientName,
- clientMachine, mtime, pc, toRemoveBlocks);
- stat = dir.getAuditFileInfo(dir.getINodesInPath4Write(src, false));
- } finally {
- writeUnlock();
- }
- getEditLog().logSync();
- if (!toRemoveBlocks.getToDeleteList().isEmpty()) {
- removeBlocks(toRemoveBlocks);
- toRemoveBlocks.clear();
- }
- logAuditEvent(true, "truncate", src, null, stat);
- return res;
- }
-
- /**
- * Truncate a file to a given size
- * Update the count at each ancestor directory with quota
- */
- boolean truncateInternal(String src, long newLength,
- String clientName, String clientMachine,
- long mtime, FSPermissionChecker pc,
- BlocksMapUpdateInfo toRemoveBlocks)
- throws IOException, UnresolvedLinkException {
- assert hasWriteLock();
- INodesInPath iip = dir.getINodesInPath4Write(src, true);
- if (isPermissionEnabled) {
- dir.checkPathAccess(pc, iip, FsAction.WRITE);
- }
- INodeFile file = INodeFile.valueOf(iip.getLastINode(), src);
- final BlockStoragePolicy lpPolicy =
- blockManager.getStoragePolicy("LAZY_PERSIST");
-
- if (lpPolicy != null &&
- lpPolicy.getId() == file.getStoragePolicyID()) {
- throw new UnsupportedOperationException(
- "Cannot truncate lazy persist file " + src);
- }
-
- // Check if the file is already being truncated with the same length
- final BlockInfo last = file.getLastBlock();
- if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
- final Block truncateBlock
- = ((BlockInfoUnderConstruction)last).getTruncateBlock();
- if (truncateBlock != null) {
- final long truncateLength = file.computeFileSize(false, false)
- + truncateBlock.getNumBytes();
- if (newLength == truncateLength) {
- return false;
- }
- }
- }
-
- // Opening an existing file for truncate. May need lease recovery.
- recoverLeaseInternal(RecoverLeaseOp.TRUNCATE_FILE,
- iip, src, clientName, clientMachine, false);
- // Truncate length check.
- long oldLength = file.computeFileSize();
- if(oldLength == newLength) {
- return true;
- }
- if(oldLength < newLength) {
- throw new HadoopIllegalArgumentException(
- "Cannot truncate to a larger file size. Current size: " + oldLength +
- ", truncate size: " + newLength + ".");
- }
- // Perform INodeFile truncation.
- final QuotaCounts delta = new QuotaCounts.Builder().build();
- boolean onBlockBoundary = dir.truncate(iip, newLength, toRemoveBlocks,
- mtime, delta);
- Block truncateBlock = null;
- if(!onBlockBoundary) {
- // Open file for write, but don't log into edits
- long lastBlockDelta = file.computeFileSize() - newLength;
- assert lastBlockDelta > 0 : "delta is 0 only if on block bounday";
- truncateBlock = prepareFileForTruncate(iip, clientName, clientMachine,
- lastBlockDelta, null);
- }
-
- // update the quota: use the preferred block size for UC block
- dir.writeLock();
- try {
- dir.updateCountNoQuotaCheck(iip, iip.length() - 1, delta);
- } finally {
- dir.writeUnlock();
- }
-
- getEditLog().logTruncate(src, clientName, clientMachine, newLength, mtime,
- truncateBlock);
- return onBlockBoundary;
- }
-
- /**
- * Convert current INode to UnderConstruction.
- * Recreate lease.
- * Create new block for the truncated copy.
- * Schedule truncation of the replicas.
- *
- * @return the returned block will be written to editLog and passed back into
- * this method upon loading.
- */
- Block prepareFileForTruncate(INodesInPath iip,
- String leaseHolder,
- String clientMachine,
- long lastBlockDelta,
- Block newBlock)
- throws IOException {
- INodeFile file = iip.getLastINode().asFile();
- file.recordModification(iip.getLatestSnapshotId());
- file.toUnderConstruction(leaseHolder, clientMachine);
- assert file.isUnderConstruction() : "inode should be under construction.";
- leaseManager.addLease(
- file.getFileUnderConstructionFeature().getClientName(), file.getId());
- boolean shouldRecoverNow = (newBlock == null);
- BlockInfo oldBlock = file.getLastBlock();
- boolean shouldCopyOnTruncate = shouldCopyOnTruncate(file, oldBlock);
- if(newBlock == null) {
- newBlock = (shouldCopyOnTruncate) ? createNewBlock() :
- new Block(oldBlock.getBlockId(), oldBlock.getNumBytes(),
- nextGenerationStamp(blockIdManager.isLegacyBlock(oldBlock)));
- }
-
- BlockInfoUnderConstruction truncatedBlockUC;
- if(shouldCopyOnTruncate) {
- // Add new truncateBlock into blocksMap and
- // use oldBlock as a source for copy-on-truncate recovery
- truncatedBlockUC = new BlockInfoUnderConstructionContiguous(newBlock,
- file.getPreferredBlockReplication());
- truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
- truncatedBlockUC.setTruncateBlock(oldBlock);
- file.setLastBlock(truncatedBlockUC, blockManager.getStorages(oldBlock));
- getBlockManager().addBlockCollection(truncatedBlockUC, file);
-
- NameNode.stateChangeLog.debug(
- "BLOCK* prepareFileForTruncate: Scheduling copy-on-truncate to new" +
- " size {} new block {} old block {}", truncatedBlockUC.getNumBytes(),
- newBlock, truncatedBlockUC.getTruncateBlock());
- } else {
- // Use new generation stamp for in-place truncate recovery
- blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta);
- oldBlock = file.getLastBlock();
- assert !oldBlock.isComplete() : "oldBlock should be under construction";
- truncatedBlockUC = (BlockInfoUnderConstruction) oldBlock;
- truncatedBlockUC.setTruncateBlock(new Block(oldBlock));
- truncatedBlockUC.getTruncateBlock().setNumBytes(
- oldBlock.getNumBytes() - lastBlockDelta);
- truncatedBlockUC.getTruncateBlock().setGenerationStamp(
- newBlock.getGenerationStamp());
-
- NameNode.stateChangeLog.debug(
- "BLOCK* prepareFileForTruncate: {} Scheduling in-place block " +
- "truncate to new size {}",
- truncatedBlockUC.getTruncateBlock().getNumBytes(), truncatedBlockUC);
- }
- if (shouldRecoverNow) {
- truncatedBlockUC.initializeBlockRecovery(newBlock.getGenerationStamp());
- }
-
- return newBlock;
- }
-
- /**
- * Defines if a replica needs to be copied on truncate or
- * can be truncated in place.
- */
- boolean shouldCopyOnTruncate(INodeFile file, BlockInfo blk) {
- if(!isUpgradeFinalized()) {
- return true;
- }
- if (isRollingUpgrade()) {
- return true;
- }
- return file.isBlockInLatestSnapshot(blk);
+ return r.getResult();
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3797f9f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index df920e0..e0f9ad2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -1008,8 +1008,8 @@ public class TestFileTruncate {
fsn.writeLock();
try {
Block oldBlock = file.getLastBlock();
- Block truncateBlock =
- fsn.prepareFileForTruncate(iip, client, clientMachine, 1, null);
+ Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
+ client, clientMachine, 1, null);
// In-place truncate uses old block id with new genStamp.
assertThat(truncateBlock.getBlockId(),
is(equalTo(oldBlock.getBlockId())));
@@ -1041,8 +1041,8 @@ public class TestFileTruncate {
fsn.writeLock();
try {
Block oldBlock = file.getLastBlock();
- Block truncateBlock =
- fsn.prepareFileForTruncate(iip, client, clientMachine, 1, null);
+ Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
+ client, clientMachine, 1, null);
// Copy-on-write truncate makes new block with new id and genStamp
assertThat(truncateBlock.getBlockId(),
is(not(equalTo(oldBlock.getBlockId()))));
[16/27] hadoop git commit: YARN-3770. SerializedException should also
handle java.lang.Error on de-serialization. Contributed by Lavkesh Lahngir
Posted by aw...@apache.org.
YARN-3770. SerializedException should also handle java.lang.Error on de-serialization. Contributed by Lavkesh Lahngir
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4672315e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4672315e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4672315e
Branch: refs/heads/HADOOP-12111
Commit: 4672315e2d6abe1cee0210cf7d3e8ab114ba933c
Parents: 460e98f
Author: Jian He <ji...@apache.org>
Authored: Mon Jun 29 14:31:32 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Mon Jun 29 14:31:49 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../api/records/impl/pb/SerializedExceptionPBImpl.java | 2 +-
.../records/impl/pb/TestSerializedExceptionPBImpl.java | 12 ++++++++++--
3 files changed, 14 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4672315e/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2fdcc9d..8461f69 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -571,6 +571,9 @@ Release 2.8.0 - UNRELEASED
YARN-3695. ServerProxy (NMProxy, etc.) shouldn't retry forever for non
network exception. (Raju Bairishetti via jianhe)
+ YARN-3770. SerializedException should also handle java.lang.Error on
+ de-serialization. (Lavkesh Lahngir via jianhe)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4672315e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SerializedExceptionPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SerializedExceptionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SerializedExceptionPBImpl.java
index fd9e170..2ec232e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SerializedExceptionPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SerializedExceptionPBImpl.java
@@ -101,7 +101,7 @@ public class SerializedExceptionPBImpl extends SerializedException {
} else if (RuntimeException.class.isAssignableFrom(realClass)) {
classType = RuntimeException.class;
} else {
- classType = Exception.class;
+ classType = Throwable.class;
}
return instantiateException(realClass.asSubclass(classType), getMessage(),
cause == null ? null : cause.deSerialize());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4672315e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestSerializedExceptionPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestSerializedExceptionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestSerializedExceptionPBImpl.java
index ac7e40e..ecfa63e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestSerializedExceptionPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestSerializedExceptionPBImpl.java
@@ -20,10 +20,9 @@ package org.apache.hadoop.yarn.api.records.impl.pb;
import java.nio.channels.ClosedChannelException;
-import org.junit.Assert;
-import org.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto;
+import org.junit.Assert;
import org.junit.Test;
public class TestSerializedExceptionPBImpl {
@@ -79,4 +78,13 @@ public class TestSerializedExceptionPBImpl {
SerializedExceptionPBImpl pb3 = new SerializedExceptionPBImpl();
Assert.assertEquals(defaultProto.getTrace(), pb3.getRemoteTrace());
}
+
+ @Test
+ public void testThrowableDeserialization() {
+ // java.lang.Error should also be serializable
+ Error ex = new Error();
+ SerializedExceptionPBImpl pb = new SerializedExceptionPBImpl();
+ pb.init(ex);
+ Assert.assertEquals(ex.getClass(), pb.deSerialize().getClass());
+ }
}
[08/27] hadoop git commit: HADOOP-12119. hadoop fs -expunge does not
work for federated namespace (Contributed by J.Andreina)
Posted by aw...@apache.org.
HADOOP-12119. hadoop fs -expunge does not work for federated namespace (Contributed by J.Andreina)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c815344e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c815344e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c815344e
Branch: refs/heads/HADOOP-12111
Commit: c815344e2e68d78f6587b65bc2db25e151aa4364
Parents: 88ceb38
Author: Vinayakumar B <vi...@apache.org>
Authored: Mon Jun 29 15:58:54 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Mon Jun 29 15:58:54 2015 +0530
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../java/org/apache/hadoop/fs/shell/Delete.java | 17 ++++++++++++++---
.../test/java/org/apache/hadoop/fs/TestTrash.java | 14 ++++++++++++--
3 files changed, 29 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c815344e/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 219ef25..0a964a3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -884,6 +884,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12076. Incomplete Cache Mechanism in CredentialProvider API.
(Larry McCay via cnauroth)
+ HADOOP-12119. hadoop fs -expunge does not work for federated namespace
+ (J.Andreina via vinayakumarb)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c815344e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
index f882817..40d9478 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
@@ -25,6 +25,7 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.PathIOException;
import org.apache.hadoop.fs.PathIsDirectoryException;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
@@ -195,9 +196,19 @@ class Delete {
@Override
protected void processArguments(LinkedList<PathData> args)
throws IOException {
- Trash trash = new Trash(getConf());
- trash.expunge();
- trash.checkpoint();
+ FileSystem[] childFileSystems =
+ FileSystem.get(getConf()).getChildFileSystems();
+ if (null != childFileSystems) {
+ for (FileSystem fs : childFileSystems) {
+ Trash trash = new Trash(fs, getConf());
+ trash.expunge();
+ trash.checkpoint();
+ }
+ } else {
+ Trash trash = new Trash(getConf());
+ trash.expunge();
+ trash.checkpoint();
+ }
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c815344e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index a675e30..9a91733 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -594,8 +594,18 @@ public class TestTrash extends TestCase {
TestLFS() {
this(new Path(TEST_DIR, "user/test"));
}
- TestLFS(Path home) {
- super();
+ TestLFS(final Path home) {
+ super(new RawLocalFileSystem() {
+ @Override
+ protected Path getInitialWorkingDirectory() {
+ return makeQualified(home);
+ }
+
+ @Override
+ public Path getHomeDirectory() {
+ return makeQualified(home);
+ }
+ });
this.home = home;
}
@Override
[25/27] hadoop git commit: Revert "HADOOP-12009 Clarify
FileSystem.listStatus() sorting order & fix
FileSystemContractBaseTest:testListStatus. (J.Andreina via stevel)"
Posted by aw...@apache.org.
Revert "HADOOP-12009 Clarify FileSystem.listStatus() sorting order & fix FileSystemContractBaseTest:testListStatus. (J.Andreina via stevel)"
This reverts commit 3dfa8161f9412bcb040f3c29c471344f25f24337.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/076948d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/076948d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/076948d9
Branch: refs/heads/HADOOP-12111
Commit: 076948d9a4053cc8be1927005c797273bae85e99
Parents: 7ba5bba
Author: Steve Loughran <st...@apache.org>
Authored: Tue Jun 30 11:54:12 2015 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Tue Jun 30 11:54:12 2015 +0100
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 4 ----
.../main/java/org/apache/hadoop/fs/FileSystem.java | 17 +----------------
.../src/site/markdown/filesystem/filesystem.md | 4 ----
.../hadoop/fs/FileSystemContractBaseTest.java | 11 +++--------
4 files changed, 4 insertions(+), 32 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/076948d9/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2cf9509..c010ff1 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -664,10 +664,6 @@ Release 2.8.0 - UNRELEASED
HADOOP-11958. MetricsSystemImpl fails to show backtrace when an error
occurs (Jason Lowe via jeagles)
-
- HADOOP-12009 Clarify FileSystem.listStatus() sorting order & fix
- FileSystemContractBaseTest:testListStatus. (J.Andreina via stevel)
-
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp
http://git-wip-us.apache.org/repos/asf/hadoop/blob/076948d9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index c98074a..c73caf7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1498,9 +1498,7 @@ public abstract class FileSystem extends Configured implements Closeable {
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
- * <p>
- * Does not guarantee to return the List of files/directories status in a
- * sorted order.
+ *
* @param f given path
* @return the statuses of the files/directories in the given patch
* @throws FileNotFoundException when the path does not exist;
@@ -1542,9 +1540,6 @@ public abstract class FileSystem extends Configured implements Closeable {
/**
* Filter files/directories in the given path using the user-supplied path
* filter.
- * <p>
- * Does not guarantee to return the List of files/directories status in a
- * sorted order.
*
* @param f
* a path name
@@ -1565,9 +1560,6 @@ public abstract class FileSystem extends Configured implements Closeable {
/**
* Filter files/directories in the given list of paths using default
* path filter.
- * <p>
- * Does not guarantee to return the List of files/directories status in a
- * sorted order.
*
* @param files
* a list of paths
@@ -1584,9 +1576,6 @@ public abstract class FileSystem extends Configured implements Closeable {
/**
* Filter files/directories in the given list of paths using user-supplied
* path filter.
- * <p>
- * Does not guarantee to return the List of files/directories status in a
- * sorted order.
*
* @param files
* a list of paths
@@ -1747,8 +1736,6 @@ public abstract class FileSystem extends Configured implements Closeable {
* while consuming the entries. Each file system implementation should
* override this method and provide a more efficient implementation, if
* possible.
- * Does not guarantee to return the iterator that traverses statuses
- * of the files in a sorted order.
*
* @param p target path
* @return remote iterator
@@ -1776,8 +1763,6 @@ public abstract class FileSystem extends Configured implements Closeable {
/**
* List the statuses and block locations of the files in the given path.
- * Does not guarantee to return the iterator that traverses statuses
- * of the files in a sorted order.
*
* If the path is a directory,
* if recursive is false, returns files in the directory;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/076948d9/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index f323374..84e3755 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -183,10 +183,6 @@ to the same path:
forall fs in listStatus(Path) :
fs == getFileStatus(fs.path)
-**Ordering of results**: there is no guarantee of ordering of the listed entries.
-While HDFS currently returns an alphanumerically sorted list, neither the Posix `readdir()`
-nor Java's `File.listFiles()` API calls define any ordering of returned values. Applications
-which require a uniform sort order on the results must perform the sorting themselves.
### Atomicity and Consistency
http://git-wip-us.apache.org/repos/asf/hadoop/blob/076948d9/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index 495af00..2ca81e9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.util.ArrayList;
import junit.framework.TestCase;
@@ -225,13 +224,9 @@ public abstract class FileSystemContractBaseTest extends TestCase {
paths = fs.listStatus(path("/test/hadoop"));
assertEquals(3, paths.length);
- ArrayList<String> list = new ArrayList<String>();
- for (FileStatus fileState : paths) {
- list.add(fileState.getPath().toString());
- }
- assertTrue(list.contains(path("/test/hadoop/a")));
- assertTrue(list.contains(path("/test/hadoop/b")));
- assertTrue(list.contains(path("/test/hadoop/c")));
+ assertEquals(path("/test/hadoop/a"), paths[0].getPath());
+ assertEquals(path("/test/hadoop/b"), paths[1].getPath());
+ assertEquals(path("/test/hadoop/c"), paths[2].getPath());
paths = fs.listStatus(path("/test/hadoop/a"));
assertEquals(0, paths.length);
[10/27] hadoop git commit: HDFS-7390. Provide JMX metrics per storage
type. (Benoy Antony)
Posted by aw...@apache.org.
HDFS-7390. Provide JMX metrics per storage type. (Benoy Antony)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3fed8e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3fed8e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3fed8e6
Branch: refs/heads/HADOOP-12111
Commit: d3fed8e653ed9e18d3a29a11c4b24a628ac770bb
Parents: fde20ff
Author: Benoy Antony <be...@apache.org>
Authored: Mon Jun 29 11:00:22 2015 -0700
Committer: Benoy Antony <be...@apache.org>
Committed: Mon Jun 29 11:00:22 2015 -0700
----------------------------------------------------------------------
.../server/blockmanagement/BlockManager.java | 15 +-
.../blockmanagement/BlockStatsMXBean.java | 36 +++++
.../blockmanagement/DatanodeStatistics.java | 6 +
.../blockmanagement/HeartbeatManager.java | 96 +++++++++++-
.../blockmanagement/StorageTypeStats.java | 115 +++++++++++++++
.../blockmanagement/TestBlockStatsMXBean.java | 146 +++++++++++++++++++
6 files changed, 412 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fed8e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5bd4980..0b60a97 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -39,6 +39,8 @@ import java.util.TreeSet;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicLong;
+import javax.management.ObjectName;
+
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@@ -85,6 +87,7 @@ import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
+import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon;
@@ -94,6 +97,7 @@ import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Sets;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -101,7 +105,7 @@ import org.slf4j.LoggerFactory;
* Keeps information related to the blocks stored in the Hadoop cluster.
*/
@InterfaceAudience.Private
-public class BlockManager {
+public class BlockManager implements BlockStatsMXBean {
public static final Logger LOG = LoggerFactory.getLogger(BlockManager.class);
public static final Logger blockLog = NameNode.blockStateChangeLog;
@@ -129,6 +133,7 @@ public class BlockManager {
private final AtomicLong postponedMisreplicatedBlocksCount = new AtomicLong(0L);
private final long startupDelayBlockDeletionInMs;
private final BlockReportLeaseManager blockReportLeaseManager;
+ private ObjectName mxBeanName;
/** Used by metrics */
public long getPendingReplicationBlocksCount() {
@@ -468,6 +473,7 @@ public class BlockManager {
pendingReplications.start();
datanodeManager.activate(conf);
this.replicationThread.start();
+ mxBeanName = MBeans.register("NameNode", "BlockStats", this);
}
public void close() {
@@ -3944,6 +3950,8 @@ public class BlockManager {
public void shutdown() {
stopReplicationInitializer();
blocksMap.close();
+ MBeans.unregister(mxBeanName);
+ mxBeanName = null;
}
public void clear() {
@@ -3954,4 +3962,9 @@ public class BlockManager {
public BlockReportLeaseManager getBlockReportLeaseManager() {
return blockReportLeaseManager;
}
+
+ @Override // BlockStatsMXBean
+ public Map<StorageType, StorageTypeStats> getStorageTypeStats() {
+ return datanodeManager.getDatanodeStatistics().getStorageTypeStats();
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fed8e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStatsMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStatsMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStatsMXBean.java
new file mode 100644
index 0000000..f22c537
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStatsMXBean.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import java.util.Map;
+
+import org.apache.hadoop.fs.StorageType;
+
+/**
+ * This is an interface used to retrieve statistic information related to
+ * block management.
+ */
+public interface BlockStatsMXBean {
+
+ /**
+ * The statistics of storage types.
+ *
+ * @return get storage statistics per storage type
+ */
+ Map<StorageType, StorageTypeStats> getStorageTypeStats();
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fed8e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java
index c9bc3e5..33eca2e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java
@@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
+import java.util.Map;
+
+import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
/** Datanode statistics */
@@ -71,4 +74,7 @@ public interface DatanodeStatistics {
/** @return the expired heartbeats */
public int getExpiredHeartbeats();
+
+ /** @return Storage Tier statistics*/
+ Map<StorageType, StorageTypeStats> getStorageTypeStats();
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fed8e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
index 9017fe1..cc9365d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
@@ -18,9 +18,15 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
import java.util.List;
+import java.util.Map;
+import java.util.Set;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -189,6 +195,11 @@ class HeartbeatManager implements DatanodeStatistics {
return stats.expiredHeartbeats;
}
+ @Override
+ public Map<StorageType, StorageTypeStats> getStorageTypeStats() {
+ return stats.statsMap.get();
+ }
+
synchronized void register(final DatanodeDescriptor d) {
if (!d.isAlive) {
addDatanode(d);
@@ -393,6 +404,9 @@ class HeartbeatManager implements DatanodeStatistics {
* For decommissioning/decommissioned nodes, only used capacity is counted.
*/
private static class Stats {
+
+ private final StorageTypeStatsMap statsMap = new StorageTypeStatsMap();
+
private long capacityTotal = 0L;
private long capacityUsed = 0L;
private long capacityRemaining = 0L;
@@ -420,6 +434,14 @@ class HeartbeatManager implements DatanodeStatistics {
}
cacheCapacity += node.getCacheCapacity();
cacheUsed += node.getCacheUsed();
+ Set<StorageType> storageTypes = new HashSet<>();
+ for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
+ statsMap.addStorage(storageInfo, node);
+ storageTypes.add(storageInfo.getStorageType());
+ }
+ for (StorageType storageType : storageTypes) {
+ statsMap.addNode(storageType, node);
+ }
}
private void subtract(final DatanodeDescriptor node) {
@@ -436,6 +458,14 @@ class HeartbeatManager implements DatanodeStatistics {
}
cacheCapacity -= node.getCacheCapacity();
cacheUsed -= node.getCacheUsed();
+ Set<StorageType> storageTypes = new HashSet<>();
+ for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
+ statsMap.subtractStorage(storageInfo, node);
+ storageTypes.add(storageInfo.getStorageType());
+ }
+ for (StorageType storageType : storageTypes) {
+ statsMap.subtractNode(storageType, node);
+ }
}
/** Increment expired heartbeat counter. */
@@ -443,5 +473,69 @@ class HeartbeatManager implements DatanodeStatistics {
expiredHeartbeats++;
}
}
-}
+ /** StorageType specific statistics.
+ * For decommissioning/decommissioned nodes, only used capacity is counted.
+ */
+
+ static final class StorageTypeStatsMap {
+
+ private Map<StorageType, StorageTypeStats> storageTypeStatsMap =
+ new IdentityHashMap<>();
+
+ private StorageTypeStatsMap() {}
+
+ private StorageTypeStatsMap(StorageTypeStatsMap other) {
+ storageTypeStatsMap =
+ new IdentityHashMap<>(other.storageTypeStatsMap);
+ for (Map.Entry<StorageType, StorageTypeStats> entry :
+ storageTypeStatsMap.entrySet()) {
+ entry.setValue(new StorageTypeStats(entry.getValue()));
+ }
+ }
+
+ private Map<StorageType, StorageTypeStats> get() {
+ return Collections.unmodifiableMap(storageTypeStatsMap);
+ }
+
+ private void addNode(StorageType storageType,
+ final DatanodeDescriptor node) {
+ StorageTypeStats storageTypeStats =
+ storageTypeStatsMap.get(storageType);
+ if (storageTypeStats == null) {
+ storageTypeStats = new StorageTypeStats();
+ storageTypeStatsMap.put(storageType, storageTypeStats);
+ }
+ storageTypeStats.addNode(node);
+ }
+
+ private void addStorage(final DatanodeStorageInfo info,
+ final DatanodeDescriptor node) {
+ StorageTypeStats storageTypeStats =
+ storageTypeStatsMap.get(info.getStorageType());
+ if (storageTypeStats == null) {
+ storageTypeStats = new StorageTypeStats();
+ storageTypeStatsMap.put(info.getStorageType(), storageTypeStats);
+ }
+ storageTypeStats.addStorage(info, node);
+ }
+
+ private void subtractStorage(final DatanodeStorageInfo info,
+ final DatanodeDescriptor node) {
+ StorageTypeStats storageTypeStats =
+ storageTypeStatsMap.get(info.getStorageType());
+ if (storageTypeStats != null) {
+ storageTypeStats.subtractStorage(info, node);
+ }
+ }
+
+ private void subtractNode(StorageType storageType,
+ final DatanodeDescriptor node) {
+ StorageTypeStats storageTypeStats =
+ storageTypeStatsMap.get(storageType);
+ if (storageTypeStats != null) {
+ storageTypeStats.subtractNode(node);
+ }
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fed8e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java
new file mode 100644
index 0000000..45dcc8d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import java.beans.ConstructorProperties;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Statistics per StorageType.
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class StorageTypeStats {
+ private long capacityTotal = 0L;
+ private long capacityUsed = 0L;
+ private long capacityRemaining = 0L;
+ private long blockPoolUsed = 0L;
+ private int nodesInService = 0;
+
+ @ConstructorProperties({"capacityTotal",
+ "capacityUsed", "capacityRemaining", "blockPoolUsed", "nodesInService"})
+ public StorageTypeStats(long capacityTotal, long capacityUsed,
+ long capacityRemaining, long blockPoolUsed, int nodesInService) {
+ this.capacityTotal = capacityTotal;
+ this.capacityUsed = capacityUsed;
+ this.capacityRemaining = capacityRemaining;
+ this.blockPoolUsed = blockPoolUsed;
+ this.nodesInService = nodesInService;
+ }
+
+ public long getCapacityTotal() {
+ return capacityTotal;
+ }
+
+ public long getCapacityUsed() {
+ return capacityUsed;
+ }
+
+ public long getCapacityRemaining() {
+ return capacityRemaining;
+ }
+
+ public long getBlockPoolUsed() {
+ return blockPoolUsed;
+ }
+
+ public int getNodesInService() {
+ return nodesInService;
+ }
+
+ StorageTypeStats() {}
+
+ StorageTypeStats(StorageTypeStats other) {
+ capacityTotal = other.capacityTotal;
+ capacityUsed = other.capacityUsed;
+ capacityRemaining = other.capacityRemaining;
+ blockPoolUsed = other.blockPoolUsed;
+ nodesInService = other.nodesInService;
+ }
+
+ void addStorage(final DatanodeStorageInfo info,
+ final DatanodeDescriptor node) {
+ capacityUsed += info.getDfsUsed();
+ blockPoolUsed += info.getBlockPoolUsed();
+ if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+ capacityTotal += info.getCapacity();
+ capacityRemaining += info.getRemaining();
+ } else {
+ capacityTotal += info.getDfsUsed();
+ }
+ }
+
+ void addNode(final DatanodeDescriptor node) {
+ if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+ nodesInService++;
+ }
+ }
+
+ void subtractStorage(final DatanodeStorageInfo info,
+ final DatanodeDescriptor node) {
+ capacityUsed -= info.getDfsUsed();
+ blockPoolUsed -= info.getBlockPoolUsed();
+ if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+ capacityTotal -= info.getCapacity();
+ capacityRemaining -= info.getRemaining();
+ } else {
+ capacityTotal -= info.getDfsUsed();
+ }
+ }
+
+ void subtractNode(final DatanodeDescriptor node) {
+ if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+ nodesInService--;
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fed8e6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
new file mode 100644
index 0000000..43d983d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mortbay.util.ajax.JSON;
+
+/**
+ * Class for testing {@link BlockStatsMXBean} implementation
+ */
+public class TestBlockStatsMXBean {
+
+ private MiniDFSCluster cluster;
+
+ @Before
+ public void setup() throws IOException {
+ HdfsConfiguration conf = new HdfsConfiguration();
+ cluster = null;
+ StorageType[][] types = new StorageType[6][];
+ for (int i=0; i<3; i++) {
+ types[i] = new StorageType[] {StorageType.RAM_DISK, StorageType.DISK};
+ }
+ for (int i=3; i< 5; i++) {
+ types[i] = new StorageType[] {StorageType.RAM_DISK, StorageType.ARCHIVE};
+ }
+ types[5] = new StorageType[] {StorageType.RAM_DISK, StorageType.ARCHIVE,
+ StorageType.ARCHIVE};
+
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6).
+ storageTypes(types).storagesPerDatanode(3).build();
+ cluster.waitActive();
+ }
+
+ @After
+ public void tearDown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ @Test
+ public void testStorageTypeStats() throws Exception {
+ Map<StorageType, StorageTypeStats> storageTypeStatsMap =
+ cluster.getNamesystem().getBlockManager().getStorageTypeStats();
+ assertTrue(storageTypeStatsMap.containsKey(StorageType.RAM_DISK));
+ assertTrue(storageTypeStatsMap.containsKey(StorageType.DISK));
+ assertTrue(storageTypeStatsMap.containsKey(StorageType.ARCHIVE));
+
+ StorageTypeStats storageTypeStats =
+ storageTypeStatsMap.get(StorageType.RAM_DISK);
+ assertEquals(6, storageTypeStats.getNodesInService());
+
+ storageTypeStats = storageTypeStatsMap.get(StorageType.DISK);
+ assertEquals(3, storageTypeStats.getNodesInService());
+
+ storageTypeStats = storageTypeStatsMap.get(StorageType.ARCHIVE);
+ assertEquals(3, storageTypeStats.getNodesInService());
+ }
+
+ protected static String readOutput(URL url) throws IOException {
+ StringBuilder out = new StringBuilder();
+ InputStream in = url.openConnection().getInputStream();
+ byte[] buffer = new byte[64 * 1024];
+ int len = in.read(buffer);
+ while (len > 0) {
+ out.append(new String(buffer, 0, len));
+ len = in.read(buffer);
+ }
+ return out.toString();
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testStorageTypeStatsJMX() throws Exception {
+ URL baseUrl = new URL (cluster.getHttpUri(0));
+ String result = readOutput(new URL(baseUrl, "/jmx"));
+ System.out.println(result);
+
+ Map<String, Object> stat = (Map<String, Object>) JSON.parse(result);
+ Object[] beans =(Object[]) stat.get("beans");
+ Map<String, Object> blockStats = null;
+ for (Object bean : beans) {
+ Map<String, Object> map = (Map<String, Object>) bean;
+ if (map.get("name").equals("Hadoop:service=NameNode,name=BlockStats")) {
+ blockStats = map;
+ }
+ }
+ assertNotNull(blockStats);
+ Object[] storageTypeStatsList =
+ (Object[])blockStats.get("StorageTypeStats");
+ assertNotNull(storageTypeStatsList);
+ assertEquals (3, storageTypeStatsList.length);
+
+ Set<String> typesPresent = new HashSet<> ();
+ for (Object obj : storageTypeStatsList) {
+ Map<String, Object> entry = (Map<String, Object>)obj;
+ String storageType = (String)entry.get("key");
+ Map<String,Object> storageTypeStats = (Map<String,Object>)entry.get("value");
+ typesPresent.add(storageType);
+ if (storageType.equals("ARCHIVE") || storageType.equals("DISK") ) {
+ assertEquals(3l, storageTypeStats.get("nodesInService"));
+ } else if (storageType.equals("RAM_DISK")) {
+ assertEquals(6l, storageTypeStats.get("nodesInService"));
+ }
+ else {
+ fail();
+ }
+ }
+
+ assertTrue(typesPresent.contains("ARCHIVE"));
+ assertTrue(typesPresent.contains("DISK"));
+ assertTrue(typesPresent.contains("RAM_DISK"));
+ }
+}
[27/27] hadoop git commit: Merge branch 'trunk' into HADOOP-12111
Posted by aw...@apache.org.
Merge branch 'trunk' into HADOOP-12111
Conflicts:
dev-support/test-patch.sh
hadoop-common-project/hadoop-common/CHANGES.txt
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c5ae8a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c5ae8a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c5ae8a8
Branch: refs/heads/HADOOP-12111
Commit: 1c5ae8a84609b45a3c42c02146f749134f1dce3e
Parents: 084becd 147e020
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Jun 30 12:00:40 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Tue Jun 30 12:00:40 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 32 ++
.../java/org/apache/hadoop/fs/FileSystem.java | 146 +++++---
.../java/org/apache/hadoop/fs/shell/Delete.java | 17 +-
.../org/apache/hadoop/io/nativeio/NativeIO.c | 1 +
.../org/apache/hadoop/net/unix/DomainSocket.c | 8 +-
.../src/org/apache/hadoop/util/NativeCrc32.c | 4 +-
.../apache/hadoop/fs/FCStatisticsBaseTest.java | 56 ++-
.../java/org/apache/hadoop/fs/TestTrash.java | 14 +-
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 38 ++
.../hadoop-hdfs/src/main/bin/hdfs | 1 +
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 +-
.../server/blockmanagement/BlockManager.java | 15 +-
.../BlockPlacementPolicyDefault.java | 3 +-
.../blockmanagement/BlockStatsMXBean.java | 36 ++
.../CacheReplicationMonitor.java | 4 +-
.../blockmanagement/DatanodeDescriptor.java | 33 +-
.../server/blockmanagement/DatanodeManager.java | 26 +-
.../blockmanagement/DatanodeStatistics.java | 6 +
.../blockmanagement/DatanodeStorageInfo.java | 13 +-
.../blockmanagement/HeartbeatManager.java | 96 ++++-
.../blockmanagement/StorageTypeStats.java | 115 ++++++
.../hdfs/server/datanode/BlockScanner.java | 38 +-
.../hdfs/server/datanode/VolumeScanner.java | 8 +-
.../hdfs/server/namenode/FSDirTruncateOp.java | 361 +++++++++++++++++++
.../hdfs/server/namenode/FSDirectory.java | 95 -----
.../hdfs/server/namenode/FSEditLogLoader.java | 6 +-
.../hdfs/server/namenode/FSNamesystem.java | 237 ++----------
.../org/apache/hadoop/hdfs/tools/DFSck.java | 1 -
.../hdfs/tools/DelegationTokenFetcher.java | 19 +-
.../src/main/resources/hdfs-default.xml | 9 +-
.../src/site/markdown/HDFSCommands.md | 10 +-
.../org/apache/hadoop/hdfs/TestDFSShell.java | 26 ++
.../blockmanagement/TestBlockStatsMXBean.java | 146 ++++++++
.../fsdataset/impl/TestFsDatasetImpl.java | 1 +
.../hdfs/server/namenode/TestDeadDatanode.java | 42 +++
.../hdfs/server/namenode/TestFileTruncate.java | 8 +-
.../hdfs/tools/TestDelegationTokenFetcher.java | 15 +
hadoop-mapreduce-project/CHANGES.txt | 12 +
.../hadoop/fs/azure/NativeAzureFileSystem.java | 5 +-
hadoop-tools/hadoop-pipes/src/CMakeLists.txt | 49 ++-
hadoop-yarn-project/CHANGES.txt | 24 ++
.../hadoop/yarn/client/cli/RMAdminCLI.java | 10 +-
.../hadoop/yarn/client/cli/TestRMAdminCLI.java | 4 +
.../impl/pb/SerializedExceptionPBImpl.java | 2 +-
.../apache/hadoop/yarn/client/ServerProxy.java | 21 +-
.../impl/pb/TestSerializedExceptionPBImpl.java | 12 +-
.../containermanager/TestNMProxy.java | 81 +++--
.../scheduler/capacity/LeafQueue.java | 3 +-
48 files changed, 1410 insertions(+), 501 deletions(-)
----------------------------------------------------------------------
[05/27] hadoop git commit: Adding release 2.7.2 to CHANGES.txt.
Posted by aw...@apache.org.
Adding release 2.7.2 to CHANGES.txt.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aad6a7d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aad6a7d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aad6a7d5
Branch: refs/heads/HADOOP-12111
Commit: aad6a7d5dba5858d6e9845f18c4baec16c91911d
Parents: c6793dd
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Sun Jun 28 16:29:12 2015 -0700
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Sun Jun 28 16:29:12 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 12 ++++++++++++
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 ++++++++++++
hadoop-mapreduce-project/CHANGES.txt | 12 ++++++++++++
hadoop-yarn-project/CHANGES.txt | 12 ++++++++++++
4 files changed, 48 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aad6a7d5/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 132e4c0..219ef25 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -884,6 +884,18 @@ Release 2.8.0 - UNRELEASED
HADOOP-12076. Incomplete Cache Mechanism in CredentialProvider API.
(Larry McCay via cnauroth)
+Release 2.7.2 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aad6a7d5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 94477fe..b89e10c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -968,6 +968,18 @@ Release 2.8.0 - UNRELEASED
HDFS-8546. Prune cached replicas from DatanodeDescriptor state on replica
invalidation. (wang)
+Release 2.7.2 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aad6a7d5/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 4df3b44..785fce8 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -512,6 +512,18 @@ Release 2.8.0 - UNRELEASED
MAPREDUCE-6413. TestLocalJobSubmission is failing with unknown host
(zhihai xu via jlowe)
+Release 2.7.2 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aad6a7d5/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cb6deb1..714bfdc 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -565,6 +565,18 @@ Release 2.8.0 - UNRELEASED
YARN-3859. LeafQueue doesn't print user properly for application add.
(Varun Saxena via devaraj)
+Release 2.7.2 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
[02/27] hadoop git commit: YARN-3859. LeafQueue doesn't print user
properly for application add. Contributed by Varun Saxena.
Posted by aw...@apache.org.
YARN-3859. LeafQueue doesn't print user properly for application add.
Contributed by Varun Saxena.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b543d1a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b543d1a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b543d1a3
Branch: refs/heads/HADOOP-12111
Commit: b543d1a390a67e5e92fea67d3a2635058c29e9da
Parents: 79ed0f9
Author: Devaraj K <de...@apache.org>
Authored: Sun Jun 28 10:04:50 2015 +0530
Committer: Devaraj K <de...@apache.org>
Committed: Sun Jun 28 10:04:50 2015 +0530
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java | 3 ++-
2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b543d1a3/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e2bf706..cb6deb1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -562,6 +562,9 @@ Release 2.8.0 - UNRELEASED
YARN-2871. TestRMRestart#testRMRestartGetApplicationList sometime fails in trunk.
(zhihai xu via xgong)
+ YARN-3859. LeafQueue doesn't print user properly for application add.
+ (Varun Saxena via devaraj)
+
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b543d1a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 8e39133..e5b44a6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -676,7 +676,8 @@ public class LeafQueue extends AbstractCSQueue {
LOG.info("Application added -" +
" appId: " + application.getApplicationId() +
- " user: " + user + "," + " leaf-queue: " + getQueueName() +
+ " user: " + application.getUser() + "," +
+ " leaf-queue: " + getQueueName() +
" #user-pending-applications: " + user.getPendingApplications() +
" #user-active-applications: " + user.getActiveApplications() +
" #queue-pending-applications: " + getNumPendingApplications() +
[03/27] hadoop git commit: HADOOP-12009 Clarify
FileSystem.listStatus() sorting order & fix
FileSystemContractBaseTest:testListStatus. (J.Andreina via stevel)
Posted by aw...@apache.org.
HADOOP-12009 Clarify FileSystem.listStatus() sorting order & fix FileSystemContractBaseTest:testListStatus. (J.Andreina via stevel)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3dfa8161
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3dfa8161
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3dfa8161
Branch: refs/heads/HADOOP-12111
Commit: 3dfa8161f9412bcb040f3c29c471344f25f24337
Parents: b543d1a
Author: Steve Loughran <st...@apache.org>
Authored: Sun Jun 28 19:13:48 2015 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Sun Jun 28 19:14:00 2015 +0100
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 4 ++++
.../main/java/org/apache/hadoop/fs/FileSystem.java | 17 ++++++++++++++++-
.../src/site/markdown/filesystem/filesystem.md | 4 ++++
.../hadoop/fs/FileSystemContractBaseTest.java | 11 ++++++++---
4 files changed, 32 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dfa8161/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 18c73c0..132e4c0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -661,6 +661,10 @@ Release 2.8.0 - UNRELEASED
HADOOP-11958. MetricsSystemImpl fails to show backtrace when an error
occurs (Jason Lowe via jeagles)
+
+ HADOOP-12009 Clarify FileSystem.listStatus() sorting order & fix
+ FileSystemContractBaseTest:testListStatus. (J.Andreina via stevel)
+
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dfa8161/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 01d4b27..3f9e3bd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1498,7 +1498,9 @@ public abstract class FileSystem extends Configured implements Closeable {
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
- *
+ * <p>
+ * Does not guarantee to return the List of files/directories status in a
+ * sorted order.
* @param f given path
* @return the statuses of the files/directories in the given patch
* @throws FileNotFoundException when the path does not exist;
@@ -1540,6 +1542,9 @@ public abstract class FileSystem extends Configured implements Closeable {
/**
* Filter files/directories in the given path using the user-supplied path
* filter.
+ * <p>
+ * Does not guarantee to return the List of files/directories status in a
+ * sorted order.
*
* @param f
* a path name
@@ -1560,6 +1565,9 @@ public abstract class FileSystem extends Configured implements Closeable {
/**
* Filter files/directories in the given list of paths using default
* path filter.
+ * <p>
+ * Does not guarantee to return the List of files/directories status in a
+ * sorted order.
*
* @param files
* a list of paths
@@ -1576,6 +1584,9 @@ public abstract class FileSystem extends Configured implements Closeable {
/**
* Filter files/directories in the given list of paths using user-supplied
* path filter.
+ * <p>
+ * Does not guarantee to return the List of files/directories status in a
+ * sorted order.
*
* @param files
* a list of paths
@@ -1736,6 +1747,8 @@ public abstract class FileSystem extends Configured implements Closeable {
* while consuming the entries. Each file system implementation should
* override this method and provide a more efficient implementation, if
* possible.
+ * Does not guarantee to return the iterator that traverses statuses
+ * of the files in a sorted order.
*
* @param p target path
* @return remote iterator
@@ -1763,6 +1776,8 @@ public abstract class FileSystem extends Configured implements Closeable {
/**
* List the statuses and block locations of the files in the given path.
+ * Does not guarantee to return the iterator that traverses statuses
+ * of the files in a sorted order.
*
* If the path is a directory,
* if recursive is false, returns files in the directory;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dfa8161/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index 84e3755..f323374 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -183,6 +183,10 @@ to the same path:
forall fs in listStatus(Path) :
fs == getFileStatus(fs.path)
+**Ordering of results**: there is no guarantee of ordering of the listed entries.
+While HDFS currently returns an alphanumerically sorted list, neither the Posix `readdir()`
+nor Java's `File.listFiles()` API calls define any ordering of returned values. Applications
+which require a uniform sort order on the results must perform the sorting themselves.
### Atomicity and Consistency
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dfa8161/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index 2ca81e9..495af00 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.util.ArrayList;
import junit.framework.TestCase;
@@ -224,9 +225,13 @@ public abstract class FileSystemContractBaseTest extends TestCase {
paths = fs.listStatus(path("/test/hadoop"));
assertEquals(3, paths.length);
- assertEquals(path("/test/hadoop/a"), paths[0].getPath());
- assertEquals(path("/test/hadoop/b"), paths[1].getPath());
- assertEquals(path("/test/hadoop/c"), paths[2].getPath());
+ ArrayList<String> list = new ArrayList<String>();
+ for (FileStatus fileState : paths) {
+ list.add(fileState.getPath().toString());
+ }
+ assertTrue(list.contains(path("/test/hadoop/a")));
+ assertTrue(list.contains(path("/test/hadoop/b")));
+ assertTrue(list.contains(path("/test/hadoop/c")));
paths = fs.listStatus(path("/test/hadoop/a"));
assertEquals(0, paths.length);
[26/27] hadoop git commit: HADOOP-12149. copy all of test-patch
BINDIR prior to re-exec (aw)
Posted by aw...@apache.org.
HADOOP-12149. copy all of test-patch BINDIR prior to re-exec (aw)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/147e020c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/147e020c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/147e020c
Branch: refs/heads/HADOOP-12111
Commit: 147e020c7aef3ba42eddcef3be1b4ae7c7910371
Parents: 076948d
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Jun 30 11:31:26 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Tue Jun 30 11:31:26 2015 -0700
----------------------------------------------------------------------
dev-support/test-patch.sh | 4 ++--
hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
2 files changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/147e020c/dev-support/test-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index cd91a5c..efcd614 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -1521,8 +1521,8 @@ function check_reexec
cd "${CWD}"
mkdir -p "${PATCH_DIR}/dev-support-test"
- cp -pr "${BASEDIR}"/dev-support/test-patch* "${PATCH_DIR}/dev-support-test"
- cp -pr "${BASEDIR}"/dev-support/smart-apply* "${PATCH_DIR}/dev-support-test"
+ (cd "${BINDIR}"; tar cpf - . ) \
+ | (cd "${PATCH_DIR}/dev-support-test"; tar xpf - )
big_console_header "exec'ing test-patch.sh now..."
http://git-wip-us.apache.org/repos/asf/hadoop/blob/147e020c/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index c010ff1..e332ea8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -232,6 +232,8 @@ Trunk (Unreleased)
HADOOP-11807. add a lint mode to releasedocmaker (ramtin via aw)
+ HADOOP-12149. copy all of test-patch BINDIR prior to re-exec (aw)
+
BUG FIXES
HADOOP-11473. test-patch says "-1 overall" even when all checks are +1
[14/27] hadoop git commit: YARN-3695. ServerProxy (NMProxy,
etc.) shouldn't retry forever for non network exception. Contributed
by Raju Bairishetti
Posted by aw...@apache.org.
YARN-3695. ServerProxy (NMProxy, etc.) shouldn't retry forever for non network exception. Contributed by Raju Bairishetti
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62e583c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62e583c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62e583c7
Branch: refs/heads/HADOOP-12111
Commit: 62e583c7dcbb30d95d8b32a4978fbdb3b98d67cc
Parents: fad291e
Author: Jian He <ji...@apache.org>
Authored: Mon Jun 29 13:37:32 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Mon Jun 29 13:37:32 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +
.../apache/hadoop/yarn/client/ServerProxy.java | 21 ++---
.../containermanager/TestNMProxy.java | 81 +++++++++++++-------
3 files changed, 70 insertions(+), 35 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62e583c7/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3ebf799..2fdcc9d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -568,6 +568,9 @@ Release 2.8.0 - UNRELEASED
YARN-3860. rmadmin -transitionToActive should check the state of non-target node.
(Masatake Iwasaki via junping_du)
+ YARN-3695. ServerProxy (NMProxy, etc.) shouldn't retry forever for non
+ network exception. (Raju Bairishetti via jianhe)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62e583c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
index e9bcf8d..de7fc7d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
@@ -53,19 +53,22 @@ public class ServerProxy {
long maxWaitTime = conf.getLong(maxWaitTimeStr, defMaxWaitTime);
long retryIntervalMS =
conf.getLong(connectRetryIntervalStr, defRetryInterval);
- if (maxWaitTime == -1) {
- // wait forever.
- return RetryPolicies.RETRY_FOREVER;
- }
- Preconditions.checkArgument(maxWaitTime > 0, "Invalid Configuration. "
- + maxWaitTimeStr + " should be a positive value.");
+ Preconditions.checkArgument((maxWaitTime == -1 || maxWaitTime > 0),
+ "Invalid Configuration. " + maxWaitTimeStr + " should be either"
+ + " positive value or -1.");
Preconditions.checkArgument(retryIntervalMS > 0, "Invalid Configuration. "
+ connectRetryIntervalStr + "should be a positive value.");
- RetryPolicy retryPolicy =
- RetryPolicies.retryUpToMaximumTimeWithFixedSleep(maxWaitTime,
- retryIntervalMS, TimeUnit.MILLISECONDS);
+ RetryPolicy retryPolicy = null;
+ if (maxWaitTime == -1) {
+ // wait forever.
+ retryPolicy = RetryPolicies.RETRY_FOREVER;
+ } else {
+ retryPolicy =
+ RetryPolicies.retryUpToMaximumTimeWithFixedSleep(maxWaitTime,
+ retryIntervalMS, TimeUnit.MILLISECONDS);
+ }
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62e583c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
index 0b372be..102c9c6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
@@ -22,6 +22,7 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.io.retry.UnreliableInterface;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@@ -58,8 +59,8 @@ public class TestNMProxy extends BaseContainerManagerTest {
@Before
public void setUp() throws Exception {
- conf.setLong(YarnConfiguration.CLIENT_NM_CONNECT_MAX_WAIT_MS, 10000);
- conf.setLong(YarnConfiguration.CLIENT_NM_CONNECT_RETRY_INTERVAL_MS, 100);
+ containerManager.start();
+ containerManager.setBlockNewContainerRequests(false);
}
@Override
@@ -77,7 +78,13 @@ public class TestNMProxy extends BaseContainerManagerTest {
// This causes super to throw an NMNotYetReadyException
containerManager.setBlockNewContainerRequests(true);
} else {
- throw new java.net.ConnectException("start container exception");
+ if (isRetryPolicyRetryForEver()) {
+ // Throw non network exception
+ throw new IOException(
+ new UnreliableInterface.UnreliableException());
+ } else {
+ throw new java.net.ConnectException("start container exception");
+ }
}
} else {
// This stops super from throwing an NMNotYetReadyException
@@ -86,6 +93,11 @@ public class TestNMProxy extends BaseContainerManagerTest {
return super.startContainers(requests);
}
+ private boolean isRetryPolicyRetryForEver() {
+ return conf.getLong(
+ YarnConfiguration.CLIENT_NM_CONNECT_MAX_WAIT_MS, 1000) == -1;
+ }
+
@Override
public StopContainersResponse stopContainers(
StopContainersRequest requests) throws YarnException, IOException {
@@ -110,30 +122,13 @@ public class TestNMProxy extends BaseContainerManagerTest {
}
@Test(timeout = 20000)
- public void testNMProxyRetry() throws Exception {
- containerManager.start();
- containerManager.setBlockNewContainerRequests(false);
- StartContainersRequest allRequests =
- Records.newRecord(StartContainersRequest.class);
- ApplicationId appId = ApplicationId.newInstance(1, 1);
- ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
+ public void testNMProxyRetry() throws Exception {
+ conf.setLong(YarnConfiguration.CLIENT_NM_CONNECT_MAX_WAIT_MS, 10000);
+ conf.setLong(YarnConfiguration.CLIENT_NM_CONNECT_RETRY_INTERVAL_MS, 100);
+ StartContainersRequest allRequests =
+ Records.newRecord(StartContainersRequest.class);
- org.apache.hadoop.yarn.api.records.Token nmToken =
- context.getNMTokenSecretManager().createNMToken(attemptId,
- context.getNodeId(), user);
- final InetSocketAddress address =
- conf.getSocketAddr(YarnConfiguration.NM_BIND_HOST,
- YarnConfiguration.NM_ADDRESS, YarnConfiguration.DEFAULT_NM_ADDRESS,
- YarnConfiguration.DEFAULT_NM_PORT);
- Token<NMTokenIdentifier> token =
- ConverterUtils.convertFromYarn(nmToken,
- SecurityUtil.buildTokenService(address));
- UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
- ugi.addToken(token);
-
- ContainerManagementProtocol proxy =
- NMProxy.createNMProxy(conf, ContainerManagementProtocol.class, ugi,
- YarnRPC.create(conf), address);
+ ContainerManagementProtocol proxy = getNMProxy();
retryCount = 0;
shouldThrowNMNotYetReadyException = false;
@@ -156,4 +151,38 @@ public class TestNMProxy extends BaseContainerManagerTest {
proxy.startContainers(allRequests);
Assert.assertEquals(5, retryCount);
}
+
+ @Test(timeout = 20000, expected = IOException.class)
+ public void testShouldNotRetryForeverForNonNetworkExceptionsOnNMConnections()
+ throws Exception {
+ conf.setLong(YarnConfiguration.CLIENT_NM_CONNECT_MAX_WAIT_MS, -1);
+ StartContainersRequest allRequests =
+ Records.newRecord(StartContainersRequest.class);
+
+ ContainerManagementProtocol proxy = getNMProxy();
+
+ shouldThrowNMNotYetReadyException = false;
+ retryCount = 0;
+ proxy.startContainers(allRequests);
+ }
+
+ private ContainerManagementProtocol getNMProxy() {
+ ApplicationId appId = ApplicationId.newInstance(1, 1);
+ ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
+
+ org.apache.hadoop.yarn.api.records.Token nmToken =
+ context.getNMTokenSecretManager().createNMToken(attemptId,
+ context.getNodeId(), user);
+ final InetSocketAddress address =
+ conf.getSocketAddr(YarnConfiguration.NM_BIND_HOST,
+ YarnConfiguration.NM_ADDRESS, YarnConfiguration.DEFAULT_NM_ADDRESS,
+ YarnConfiguration.DEFAULT_NM_PORT);
+ Token<NMTokenIdentifier> token =
+ ConverterUtils.convertFromYarn(nmToken,
+ SecurityUtil.buildTokenService(address));
+ UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
+ ugi.addToken(token);
+ return NMProxy.createNMProxy(conf, ContainerManagementProtocol.class, ugi,
+ YarnRPC.create(conf), address);
+ }
}
[22/27] hadoop git commit: HDFS-8579. Update HDFS usage with missing
options (Contributed by J.Andreina)
Posted by aw...@apache.org.
HDFS-8579. Update HDFS usage with missing options (Contributed by J.Andreina)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77588e1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77588e1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77588e1d
Branch: refs/heads/HADOOP-12111
Commit: 77588e1d32cc465e6b0699bb3564a4c7c6df16fa
Parents: d3797f9
Author: Vinayakumar B <vi...@apache.org>
Authored: Tue Jun 30 15:00:22 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Tue Jun 30 15:00:22 2015 +0530
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs | 1 +
2 files changed, 4 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/77588e1d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3535f90..ea67ea1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -983,6 +983,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8687. Remove the duplicate usage message from Dfsck.java. (Brahma
Reddy Battula via Arpit Agarwal)
+ HDFS-8579. Update HDFS usage with missing options
+ (J.Andreina via vinayakumarb)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/77588e1d/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index f464261..2e09cef 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -26,6 +26,7 @@ function hadoop_usage
echo " Hadoop jar and the required libraries"
echo " crypto configure HDFS encryption zones"
echo " datanode run a DFS datanode"
+ echo " debug run a Debug Admin to execute HDFS debug commands"
echo " dfs run a filesystem command on the file system"
echo " dfsadmin run a DFS admin client"
echo " fetchdt fetch a delegation token from the NameNode"
[20/27] hadoop git commit: HDFS-8687. Remove the duplicate usage
message from Dfsck.java. (Contributed by Brahma Reddy Battula)
Posted by aw...@apache.org.
HDFS-8687. Remove the duplicate usage message from Dfsck.java. (Contributed by Brahma Reddy Battula)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e333720
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e333720
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e333720
Branch: refs/heads/HADOOP-12111
Commit: 8e333720e13428a4d0d0f65692102f8f2e5da98d
Parents: 43a1288
Author: Arpit Agarwal <ar...@apache.org>
Authored: Mon Jun 29 14:49:00 2015 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Jun 29 14:56:42 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java | 1 -
2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e333720/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index eb006eb..108a6c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -977,6 +977,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8628. Update missing command option for fetchdt
(J.Andreina via vinayakumarb)
+ HDFS-8687. Remove the duplicate usage message from Dfsck.java. (Brahma
+ Reddy Battula via Arpit Agarwal)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e333720/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index 33033e7..6bb6603 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -377,7 +377,6 @@ public class DFSck extends Configured implements Tool {
int res = -1;
if ((args.length == 0) || ("-files".equals(args[0]))) {
printUsage(System.err);
- ToolRunner.printGenericCommandUsage(System.err);
} else if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
res = 0;
} else {
[06/27] hadoop git commit: YARN-3860. rmadmin -transitionToActive
should check the state of non-target node. (Contributed by Masatake Iwasaki)
Posted by aw...@apache.org.
YARN-3860. rmadmin -transitionToActive should check the state of non-target node. (Contributed by Masatake Iwasaki)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a95d39f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a95d39f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a95d39f9
Branch: refs/heads/HADOOP-12111
Commit: a95d39f9d08b3b215a1b33e77e9ab8a2ee59b3a9
Parents: aad6a7d
Author: Junping Du <ju...@apache.org>
Authored: Sun Jun 28 22:26:47 2015 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Sun Jun 28 22:26:47 2015 -0700
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../org/apache/hadoop/yarn/client/cli/RMAdminCLI.java | 10 +++++++++-
.../org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java | 4 ++++
3 files changed, 16 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a95d39f9/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 714bfdc..3ebf799 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -565,6 +565,9 @@ Release 2.8.0 - UNRELEASED
YARN-3859. LeafQueue doesn't print user properly for application add.
(Varun Saxena via devaraj)
+ YARN-3860. rmadmin -transitionToActive should check the state of non-target node.
+ (Masatake Iwasaki via junping_du)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a95d39f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 932b62d..36d29d6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -797,7 +797,15 @@ public class RMAdminCLI extends HAAdmin {
"Could not connect to RM HA Admin for node " + rmId);
}
}
-
+
+ /**
+ * returns the list of all resourcemanager ids for the given configuration.
+ */
+ @Override
+ protected Collection<String> getTargetIds(String targetNodeToActivate) {
+ return HAUtil.getRMHAIds(getConf());
+ }
+
@Override
protected String getUsageString() {
return "Usage: rmadmin";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a95d39f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
index a95412c..6dc326a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
@@ -27,6 +27,7 @@ import static org.mockito.Matchers.argThat;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@@ -116,6 +117,7 @@ public class TestRMAdminCLI {
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
+ conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2");
rmAdminCLIWithHAEnabled = new RMAdminCLI(conf) {
@Override
@@ -259,6 +261,8 @@ public class TestRMAdminCLI {
assertEquals(0, rmAdminCLIWithHAEnabled.run(args));
verify(haadmin).transitionToActive(
any(HAServiceProtocol.StateChangeRequestInfo.class));
+ // HAAdmin#isOtherTargetNodeActive should check state of non-target node.
+ verify(haadmin, times(1)).getServiceStatus();
}
@Test(timeout = 500)
[13/27] hadoop git commit: HADOOP-12112. Make hadoop-common-project
Native code -Wall-clean (alanburlison via cmccabe)
Posted by aw...@apache.org.
HADOOP-12112. Make hadoop-common-project Native code -Wall-clean (alanburlison via cmccabe)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fad291ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fad291ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fad291ea
Branch: refs/heads/HADOOP-12111
Commit: fad291ea6dbe49782e33a32cd6608088951e2c58
Parents: 5a27c3f
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Mon Jun 29 12:34:47 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Mon Jun 29 12:35:16 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c | 1 +
.../native/src/org/apache/hadoop/net/unix/DomainSocket.c | 8 ++++----
.../src/main/native/src/org/apache/hadoop/util/NativeCrc32.c | 4 ++--
4 files changed, 10 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fad291ea/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index e8e85a0..9008ead 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -691,6 +691,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12036. Consolidate all of the cmake extensions in one directory
(alanburlison via cmccabe)
+ HADOOP-12112. Make hadoop-common-project Native code -Wall-clean
+ (alanburlison via cmccabe)
+
BUG FIXES
HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fad291ea/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
index 071d830..bc78ab2 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
@@ -592,6 +592,7 @@ done:
#else
THROW(env, "java/io/IOException",
"The function Windows.createFileWithMode0() is not supported on this platform");
+ return NULL;
#endif
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fad291ea/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c
index 55ab0c3..a3f27ee 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c
@@ -112,9 +112,9 @@ static jthrowable newSocketException(JNIEnv *env, int errnum,
* to the heap if necessary.
*/
struct flexibleBuffer {
- int8_t *curBuf;
- int8_t *allocBuf;
- int8_t stackBuf[8196];
+ jbyte *curBuf;
+ jbyte *allocBuf;
+ jbyte stackBuf[8196];
};
static jthrowable flexBufInit(JNIEnv *env, struct flexibleBuffer *flexBuf, jint length)
@@ -637,7 +637,7 @@ JNIEnv *env, jclass clazz, jint fd)
* @return NULL on success; or the unraised exception representing
* the problem.
*/
-static jthrowable write_fully(JNIEnv *env, int fd, int8_t *buf, int amt)
+static jthrowable write_fully(JNIEnv *env, int fd, jbyte *buf, int amt)
{
int err, res;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fad291ea/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
index 74e09e6..3fceb02 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
@@ -168,7 +168,7 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_util_NativeCrc32_nativeComputeChun
// Setup complete. Actually verify checksums.
ret = bulk_crc(data, data_len, sums, crc_type,
bytes_per_checksum, verify ? &error_data : NULL);
- if (likely(verify && ret == CHECKSUMS_VALID || !verify && ret == 0)) {
+ if (likely((verify && ret == CHECKSUMS_VALID) || (!verify && ret == 0))) {
return;
} else if (unlikely(verify && ret == INVALID_CHECKSUM_DETECTED)) {
long pos = base_pos + (error_data.bad_data - data);
@@ -261,7 +261,7 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_util_NativeCrc32_nativeComputeChun
env, error_data.got_crc, error_data.expected_crc,
j_filename, pos);
return;
- } else if (unlikely(verify && ret != CHECKSUMS_VALID || !verify && ret != 0)) {
+ } else if (unlikely((verify && ret != CHECKSUMS_VALID) || (!verify && ret != 0))) {
THROW(env, "java/lang/AssertionError",
"Bad response code from native bulk_crc");
return;
[09/27] hadoop git commit: HDFS-8628. Update missing command option
for fetchdt (Contributed by J.Andreina)
Posted by aw...@apache.org.
HDFS-8628. Update missing command option for fetchdt (Contributed by J.Andreina)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fde20ffc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fde20ffc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fde20ffc
Branch: refs/heads/HADOOP-12111
Commit: fde20ffcef6dcd477f501b5f80c86665296711c5
Parents: c815344
Author: Vinayakumar B <vi...@apache.org>
Authored: Mon Jun 29 16:50:48 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Mon Jun 29 16:50:48 2015 +0530
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../hadoop-hdfs/src/site/markdown/HDFSCommands.md | 10 +++++++---
2 files changed, 10 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fde20ffc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3800184..e55f340 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -971,6 +971,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8586. Dead Datanode is allocated for write when client is from deadnode
(Brahma Reddy Battula via vinayakumarb)
+ HDFS-8628. Update missing command option for fetchdt
+ (J.Andreina via vinayakumarb)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fde20ffc/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 6841201..094dda1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -89,12 +89,16 @@ Run a filesystem command on the file system supported in Hadoop. The various COM
### `fetchdt`
-Usage: `hdfs fetchdt [--webservice <namenode_http_addr>] <path> `
+Usage: `hdfs fetchdt <opts> <token_file_path> `
| COMMAND\_OPTION | Description |
|:---- |:---- |
-| `--webservice` *https\_address* | use http protocol instead of RPC |
-| *fileName* | File name to store the token into. |
+| `--webservice` *NN_Url* | Url to contact NN on (starts with http or https)|
+| `--renewer` *name* | Name of the delegation token renewer |
+| `--cancel` | Cancel the delegation token |
+| `--renew` | Renew the delegation token. Delegation token must have been fetched using the --renewer *name* option.|
+| `--print` | Print the delegation token |
+| *token_file_path* | File path to store the token into. |
Gets Delegation Token from a NameNode. See [fetchdt](./HdfsUserGuide.html#fetchdt) for more info.
[24/27] hadoop git commit: HDFS-8627. NPE thrown if unable to fetch
token from Namenode (Contributed by J.Andreina)
Posted by aw...@apache.org.
HDFS-8627. NPE thrown if unable to fetch token from Namenode (Contributed by J.Andreina)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ba5bbac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ba5bbac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ba5bbac
Branch: refs/heads/HADOOP-12111
Commit: 7ba5bbac02b688f68a8d23671a1e869234b4cebe
Parents: 6d99017
Author: Vinayakumar B <vi...@apache.org>
Authored: Tue Jun 30 15:42:59 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Tue Jun 30 15:42:59 2015 +0530
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../hdfs/tools/DelegationTokenFetcher.java | 19 +++++++++++--------
.../hdfs/tools/TestDelegationTokenFetcher.java | 15 +++++++++++++++
3 files changed, 29 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ba5bbac/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ea67ea1..b065f98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -338,6 +338,9 @@ Trunk (Unreleased)
HDFS-8412. Fix the test failures in HTTPFS. (umamahesh)
+ HDFS-8627. NPE thrown if unable to fetch token from Namenode
+ (J.Andreina via vinayakumarb)
+
Release 2.8.0 - UNRELEASED
NEW FEATURES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ba5bbac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
index 6376243..803402d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
@@ -176,14 +176,17 @@ public class DelegationTokenFetcher {
final String renewer, final Path tokenFile)
throws IOException {
Token<?> token = fs.getDelegationToken(renewer);
-
- Credentials cred = new Credentials();
- cred.addToken(token.getKind(), token);
- cred.writeTokenStorageFile(tokenFile, conf);
-
- if (LOG.isDebugEnabled()) {
- LOG.debug("Fetched token " + fs.getUri() + " for " + token.getService()
- + " into " + tokenFile);
+ if (null != token) {
+ Credentials cred = new Credentials();
+ cred.addToken(token.getKind(), token);
+ cred.writeTokenStorageFile(tokenFile, conf);
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Fetched token " + fs.getUri() + " for " +
+ token.getService() + " into " + tokenFile);
+ }
+ } else {
+ System.err.println("ERROR: Failed to fetch token from " + fs.getUri());
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ba5bbac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java
index b4cf1c0..ab3933b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java
@@ -90,4 +90,19 @@ public class TestDelegationTokenFetcher {
DelegationTokenFetcher.cancelTokens(conf, p);
Assert.assertEquals(testToken, FakeRenewer.getLastCanceled());
}
+
+ /**
+ * If token returned is null, saveDelegationToken should not
+ * throw nullPointerException
+ */
+ @Test
+ public void testReturnedTokenIsNull() throws Exception {
+ WebHdfsFileSystem fs = mock(WebHdfsFileSystem.class);
+ doReturn(null).when(fs).getDelegationToken(anyString());
+ Path p = new Path(f.getRoot().getAbsolutePath(), tokenFile);
+ DelegationTokenFetcher.saveDelegationToken(conf, fs, null, p);
+ // When Token returned is null, TokenFile should not exist
+ Assert.assertFalse(p.getFileSystem(conf).exists(p));
+
+ }
}
[15/27] hadoop git commit: HADOOP-12089. StorageException complaining
" no lease ID" when updating FolderLastModifiedTime in WASB. Contributed by
Duo Xu.
Posted by aw...@apache.org.
HADOOP-12089. StorageException complaining " no lease ID" when updating FolderLastModifiedTime in WASB. Contributed by Duo Xu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/460e98f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/460e98f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/460e98f7
Branch: refs/heads/HADOOP-12111
Commit: 460e98f7b3ec84f3c5afcb2aad4f4e7031d16e3a
Parents: 62e583c
Author: cnauroth <cn...@apache.org>
Authored: Mon Jun 29 13:48:02 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Mon Jun 29 13:48:02 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java | 5 ++---
2 files changed, 5 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/460e98f7/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9008ead..a9b44e3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -893,6 +893,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12119. hadoop fs -expunge does not work for federated namespace
(J.Andreina via vinayakumarb)
+ HADOOP-12089. StorageException complaining " no lease ID" when updating
+ FolderLastModifiedTime in WASB. (Duo Xu via cnauroth)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/460e98f7/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 623645a..a567b33 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -1500,7 +1500,7 @@ public class NativeAzureFileSystem extends FileSystem {
createPermissionStatus(FsPermission.getDefault()));
} else {
if (!skipParentFolderLastModifidedTimeUpdate) {
- store.updateFolderLastModifiedTime(parentKey, null);
+ updateParentFolderLastModifiedTime(key);
}
}
}
@@ -1561,9 +1561,8 @@ public class NativeAzureFileSystem extends FileSystem {
// Update parent directory last modified time
Path parent = absolutePath.getParent();
if (parent != null && parent.getParent() != null) { // not root
- String parentKey = pathToKey(parent);
if (!skipParentFolderLastModifidedTimeUpdate) {
- store.updateFolderLastModifiedTime(parentKey, null);
+ updateParentFolderLastModifiedTime(key);
}
}
instrumentation.directoryDeleted();
[12/27] hadoop git commit: HADOOP-12104. Migrate Hadoop Pipes native
build to new CMake framework (Alan Burlison via Colin P. McCabe)
Posted by aw...@apache.org.
HADOOP-12104. Migrate Hadoop Pipes native build to new CMake framework (Alan Burlison via Colin P. McCabe)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a27c3fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a27c3fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a27c3fd
Branch: refs/heads/HADOOP-12111
Commit: 5a27c3fd7616215937264c2b1f015205e60f2d73
Parents: 2ffd842
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Mon Jun 29 12:01:17 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Mon Jun 29 12:31:26 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 ++
hadoop-tools/hadoop-pipes/src/CMakeLists.txt | 49 +++++++++-----------
2 files changed, 24 insertions(+), 28 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a27c3fd/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0a964a3..e8e85a0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -685,6 +685,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-11885. hadoop-dist dist-layout-stitching.sh does not work with dash.
(wang)
+ HADOOP-12104. Migrate Hadoop Pipes native build to new CMake framework
+ (alanburlison via cmccabe)
+
HADOOP-12036. Consolidate all of the cmake extensions in one directory
(alanburlison via cmccabe)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a27c3fd/hadoop-tools/hadoop-pipes/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-pipes/src/CMakeLists.txt b/hadoop-tools/hadoop-pipes/src/CMakeLists.txt
index 170af49..3b0b28c 100644
--- a/hadoop-tools/hadoop-pipes/src/CMakeLists.txt
+++ b/hadoop-tools/hadoop-pipes/src/CMakeLists.txt
@@ -6,7 +6,7 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-
+#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
@@ -17,25 +17,11 @@
#
cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
-find_package(OpenSSL REQUIRED)
-
-set(CMAKE_BUILD_TYPE, Release)
-set(PIPES_FLAGS "-g -Wall -O2 -D_REENTRANT -D_GNU_SOURCE")
-set(PIPES_FLAGS "${PIPES_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${PIPES_FLAGS}")
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${PIPES_FLAGS}")
+list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/../../../hadoop-common-project/hadoop-common)
+include(HadoopCommon)
-include(../../../hadoop-common-project/hadoop-common/src/JNIFlags.cmake NO_POLICY_SCOPE)
-
-function(output_directory TGT DIR)
- SET_TARGET_PROPERTIES(${TGT} PROPERTIES
- RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
- SET_TARGET_PROPERTIES(${TGT} PROPERTIES
- ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
- SET_TARGET_PROPERTIES(${TGT} PROPERTIES
- LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
-endfunction(output_directory TGT DIR)
+find_package(OpenSSL REQUIRED)
include_directories(
main/native/utils/api
@@ -47,19 +33,19 @@ include_directories(
# Example programs
add_executable(wordcount-simple main/native/examples/impl/wordcount-simple.cc)
target_link_libraries(wordcount-simple hadooppipes hadooputils)
-output_directory(wordcount-simple examples)
+hadoop_output_directory(wordcount-simple examples)
add_executable(wordcount-part main/native/examples/impl/wordcount-part.cc)
target_link_libraries(wordcount-part hadooppipes hadooputils)
-output_directory(wordcount-part examples)
+hadoop_output_directory(wordcount-part examples)
add_executable(wordcount-nopipe main/native/examples/impl/wordcount-nopipe.cc)
target_link_libraries(wordcount-nopipe hadooppipes hadooputils)
-output_directory(wordcount-nopipe examples)
+hadoop_output_directory(wordcount-nopipe examples)
add_executable(pipes-sort main/native/examples/impl/sort.cc)
target_link_libraries(pipes-sort hadooppipes hadooputils)
-output_directory(pipes-sort examples)
+hadoop_output_directory(pipes-sort examples)
add_library(hadooputils STATIC
main/native/utils/impl/StringUtils.cc
@@ -70,15 +56,22 @@ add_library(hadooppipes STATIC
main/native/pipes/impl/HadoopPipes.cc
)
-INCLUDE(CheckLibraryExists)
-CHECK_LIBRARY_EXISTS(dl dlopen "" NEED_LINK_DL)
+include(CheckLibraryExists)
+check_library_exists(dl dlopen "" NEED_LINK_DL)
+
+if(NEED_LINK_DL)
+ set(LIB_DL "dl")
+endif()
-if (NEED_LINK_DL)
- set(LIB_DL dl)
-endif (NEED_LINK_DL)
+if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
+ exec_program("uname" ARGS "-r" OUTPUT_VARIABLE OS_VERSION)
+ if(OS_VERSION VERSION_LESS "5.12")
+ set(LIB_NET "socket" "nsl")
+ endif()
+endif()
target_link_libraries(hadooppipes
${OPENSSL_LIBRARIES}
${LIB_DL}
- pthread
+ ${LIB_NET}
)
[04/27] hadoop git commit: HDFS-8681. BlockScanner is incorrectly
disabled by default. (Contributed by Arpit Agarwal)
Posted by aw...@apache.org.
HDFS-8681. BlockScanner is incorrectly disabled by default. (Contributed by Arpit Agarwal)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6793dd8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6793dd8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6793dd8
Branch: refs/heads/HADOOP-12111
Commit: c6793dd8cc69ea994eb23c3e1349efe4b9feca9a
Parents: 3dfa816
Author: Arpit Agarwal <ar...@apache.org>
Authored: Sun Jun 28 14:51:17 2015 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Sun Jun 28 14:51:36 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 +-
.../hdfs/server/datanode/BlockScanner.java | 36 ++++++++++++++++----
.../src/main/resources/hdfs-default.xml | 9 +++--
.../fsdataset/impl/TestFsDatasetImpl.java | 1 +
5 files changed, 41 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6793dd8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e287ea4..94477fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1113,6 +1113,9 @@ Release 2.7.1 - UNRELEASED
HDFS08656. Preserve compatibility of ClientProtocol#rollingUpgrade after
finalization. (wang)
+ HDFS-8681. BlockScanner is incorrectly disabled by default.
+ (Arpit Agarwal)
+
Release 2.7.0 - 2015-04-20
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6793dd8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index ebd668f..0e569f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -378,7 +378,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_MAX_RECEIVER_THREADS_KEY = "dfs.datanode.max.transfer.threads";
public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;
public static final String DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours";
- public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
+ public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 21 * 24; // 3 weeks.
public static final String DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND = "dfs.block.scanner.volume.bytes.per.second";
public static final long DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT = 1048576L;
public static final String DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6793dd8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index b0248c5..9c4dd10 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -115,6 +115,34 @@ public class BlockScanner {
}
}
+ /**
+ * Determine the configured block scanner interval.
+ *
+ * For compatibility with prior releases of HDFS, if the
+ * configured value is zero then the scan period is
+ * set to 3 weeks.
+ *
+ * If the configured value is less than zero then the scanner
+ * is disabled.
+ *
+ * @param conf Configuration object.
+ * @return block scan period in milliseconds.
+ */
+ private static long getConfiguredScanPeriodMs(Configuration conf) {
+ long tempScanPeriodMs = getUnitTestLong(
+ conf, INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS,
+ TimeUnit.MILLISECONDS.convert(conf.getLong(
+ DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
+ DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT), TimeUnit.HOURS));
+
+ if (tempScanPeriodMs == 0) {
+ tempScanPeriodMs = TimeUnit.MILLISECONDS.convert(
+ DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT, TimeUnit.HOURS);
+ }
+
+ return tempScanPeriodMs;
+ }
+
@SuppressWarnings("unchecked")
Conf(Configuration conf) {
this.targetBytesPerSec = Math.max(0L, conf.getLong(
@@ -123,11 +151,7 @@ public class BlockScanner {
this.maxStalenessMs = Math.max(0L, getUnitTestLong(conf,
INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS,
INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS_DEFAULT));
- this.scanPeriodMs = Math.max(0L,
- getUnitTestLong(conf, INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS,
- TimeUnit.MILLISECONDS.convert(conf.getLong(
- DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
- DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT), TimeUnit.HOURS)));
+ this.scanPeriodMs = getConfiguredScanPeriodMs(conf);
this.cursorSaveMs = Math.max(0L, getUnitTestLong(conf,
INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS,
INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS_DEFAULT));
@@ -159,7 +183,7 @@ public class BlockScanner {
* no threads will start.
*/
public boolean isEnabled() {
- return (conf.scanPeriodMs) > 0 && (conf.targetBytesPerSec > 0);
+ return (conf.scanPeriodMs > 0) && (conf.targetBytesPerSec > 0);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6793dd8/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 76161a5..8cb7d5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1071,11 +1071,14 @@
<property>
<name>dfs.datanode.scan.period.hours</name>
- <value>0</value>
+ <value>504</value>
<description>
- If this is 0 or negative, the DataNode's block scanner will be
- disabled. If this is positive, the DataNode will not scan any
+ If this is positive, the DataNode will not scan any
individual block more than once in the specified scan period.
+ If this is negative, the block scanner is disabled.
+ If this is set to zero, then the default value of 504 hours
+ or 3 weeks is used. Prior versions of HDFS incorrectly documented
+ that setting this key to zero will disable the block scanner.
</description>
</property>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6793dd8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 59c7ade..d03fa2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -274,6 +274,7 @@ public class TestFsDatasetImpl {
public void testChangeVolumeWithRunningCheckDirs() throws IOException {
RoundRobinVolumeChoosingPolicy<FsVolumeImpl> blockChooser =
new RoundRobinVolumeChoosingPolicy<>();
+ conf.setLong(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
final BlockScanner blockScanner = new BlockScanner(datanode, conf);
final FsVolumeList volumeList = new FsVolumeList(
Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
[11/27] hadoop git commit: HDFS-8653. Code cleanup for
DatanodeManager,
DatanodeDescriptor and DatanodeStorageInfo. Contributed by Zhe Zhang.
Posted by aw...@apache.org.
HDFS-8653. Code cleanup for DatanodeManager, DatanodeDescriptor and DatanodeStorageInfo. Contributed by Zhe Zhang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ffd8427
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ffd8427
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ffd8427
Branch: refs/heads/HADOOP-12111
Commit: 2ffd84273ac490724fe7e7825664bb6d09ef0e99
Parents: d3fed8e
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Jun 29 12:12:34 2015 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Jun 29 12:12:41 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../CacheReplicationMonitor.java | 4 +--
.../blockmanagement/DatanodeDescriptor.java | 33 ++++++++++----------
.../server/blockmanagement/DatanodeManager.java | 26 +++++++--------
.../blockmanagement/DatanodeStorageInfo.java | 13 ++++++--
5 files changed, 45 insertions(+), 34 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ffd8427/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e55f340..0c56f2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -682,6 +682,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8623. Refactor NameNode handling of invalid, corrupt, and under-recovery
blocks. (Zhe Zhang via jing9)
+ HDFS-8653. Code cleanup for DatanodeManager, DatanodeDescriptor and
+ DatanodeStorageInfo. (Zhe Zhang via wang)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ffd8427/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
index a0f3503..2f81ddf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
@@ -452,8 +452,8 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
file.getFullPathName(), cachedTotal, neededTotal);
}
- private String findReasonForNotCaching(CachedBlock cblock,
- BlockInfo blockInfo) {
+ private String findReasonForNotCaching(CachedBlock cblock,
+ BlockInfo blockInfo) {
if (blockInfo == null) {
// Somehow, a cache report with the block arrived, but the block
// reports from the DataNode haven't (yet?) described such a block.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ffd8427/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index dd7b301..99def6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -64,7 +64,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
// Stores status of decommissioning.
// If node is not decommissioning, do not use this object for anything.
- public final DecommissioningStatus decommissioningStatus = new DecommissioningStatus();
+ public final DecommissioningStatus decommissioningStatus =
+ new DecommissioningStatus();
private long curBlockReportId = 0;
@@ -115,7 +116,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
return null;
}
- List<E> results = new ArrayList<E>();
+ List<E> results = new ArrayList<>();
for(; !blockq.isEmpty() && numBlocks > 0; numBlocks--) {
results.add(blockq.poll());
}
@@ -135,7 +136,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
}
private final Map<String, DatanodeStorageInfo> storageMap =
- new HashMap<String, DatanodeStorageInfo>();
+ new HashMap<>();
/**
* A list of CachedBlock objects on this datanode.
@@ -217,12 +218,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
private long bandwidth;
/** A queue of blocks to be replicated by this datanode */
- private final BlockQueue<BlockTargetPair> replicateBlocks = new BlockQueue<BlockTargetPair>();
+ private final BlockQueue<BlockTargetPair> replicateBlocks =
+ new BlockQueue<>();
/** A queue of blocks to be recovered by this datanode */
private final BlockQueue<BlockInfoUnderConstruction> recoverBlocks =
- new BlockQueue<BlockInfoUnderConstruction>();
+ new BlockQueue<>();
/** A set of blocks to be invalidated by this datanode */
- private final LightWeightHashSet<Block> invalidateBlocks = new LightWeightHashSet<Block>();
+ private final LightWeightHashSet<Block> invalidateBlocks =
+ new LightWeightHashSet<>();
/* Variables for maintaining number of blocks scheduled to be written to
* this storage. This count is approximate and might be slightly bigger
@@ -230,9 +233,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
* while writing the block).
*/
private EnumCounters<StorageType> currApproxBlocksScheduled
- = new EnumCounters<StorageType>(StorageType.class);
+ = new EnumCounters<>(StorageType.class);
private EnumCounters<StorageType> prevApproxBlocksScheduled
- = new EnumCounters<StorageType>(StorageType.class);
+ = new EnumCounters<>(StorageType.class);
private long lastBlocksScheduledRollTime = 0;
private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min
private int volumeFailures = 0;
@@ -276,6 +279,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
return storageMap.get(storageID);
}
}
+
DatanodeStorageInfo[] getStorageInfos() {
synchronized (storageMap) {
final Collection<DatanodeStorageInfo> storages = storageMap.values();
@@ -321,7 +325,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
Long.toHexString(curBlockReportId));
iter.remove();
if (zombies == null) {
- zombies = new LinkedList<DatanodeStorageInfo>();
+ zombies = new LinkedList<>();
}
zombies.add(storageInfo);
}
@@ -350,10 +354,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
*/
boolean removeBlock(String storageID, BlockInfo b) {
DatanodeStorageInfo s = getStorageInfo(storageID);
- if (s != null) {
- return s.removeBlock(b);
- }
- return false;
+ return s != null && s.removeBlock(b);
}
public void resetBlocks() {
@@ -449,7 +450,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
+ this.volumeFailures + " to " + volFailures);
synchronized (storageMap) {
failedStorageInfos =
- new HashSet<DatanodeStorageInfo>(storageMap.values());
+ new HashSet<>(storageMap.values());
}
}
@@ -505,7 +506,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
HashMap<String, DatanodeStorageInfo> excessStorages;
// Init excessStorages with all known storages.
- excessStorages = new HashMap<String, DatanodeStorageInfo>(storageMap);
+ excessStorages = new HashMap<>(storageMap);
// Remove storages that the DN reported in the heartbeat.
for (final StorageReport report : reports) {
@@ -542,7 +543,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
private final List<Iterator<BlockInfo>> iterators;
private BlockIterator(final DatanodeStorageInfo... storages) {
- List<Iterator<BlockInfo>> iterators = new ArrayList<Iterator<BlockInfo>>();
+ List<Iterator<BlockInfo>> iterators = new ArrayList<>();
for (DatanodeStorageInfo e : storages) {
iterators.add(e.getBlockIterator());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ffd8427/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 8143fb4..4266004 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -85,7 +85,7 @@ public class DatanodeManager {
* Mapping: StorageID -> DatanodeDescriptor
*/
private final Map<String, DatanodeDescriptor> datanodeMap
- = new HashMap<String, DatanodeDescriptor>();
+ = new HashMap<>();
/** Cluster network topology */
private final NetworkTopology networktopology;
@@ -162,7 +162,7 @@ public class DatanodeManager {
* Software version -> Number of datanodes with this version
*/
private HashMap<String, Integer> datanodesSoftwareVersions =
- new HashMap<String, Integer>(4, 0.75f);
+ new HashMap<>(4, 0.75f);
/**
* The minimum time between resending caching directives to Datanodes,
@@ -217,7 +217,7 @@ public class DatanodeManager {
// locations of those hosts in the include list and store the mapping
// in the cache; so future calls to resolve will be fast.
if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
- final ArrayList<String> locations = new ArrayList<String>();
+ final ArrayList<String> locations = new ArrayList<>();
for (InetSocketAddress addr : hostFileManager.getIncludes()) {
locations.add(addr.getAddress().getHostAddress());
}
@@ -370,7 +370,7 @@ public class DatanodeManager {
// here we should get node but not datanode only .
Node client = getDatanodeByHost(targethost);
if (client == null) {
- List<String> hosts = new ArrayList<String> (1);
+ List<String> hosts = new ArrayList<> (1);
hosts.add(targethost);
List<String> resolvedHosts = dnsToSwitchMapping.resolve(hosts);
if (resolvedHosts != null && !resolvedHosts.isEmpty()) {
@@ -522,7 +522,7 @@ public class DatanodeManager {
void datanodeDump(final PrintWriter out) {
synchronized (datanodeMap) {
Map<String,DatanodeDescriptor> sortedDatanodeMap =
- new TreeMap<String,DatanodeDescriptor>(datanodeMap);
+ new TreeMap<>(datanodeMap);
out.println("Metasave: Number of datanodes: " + datanodeMap.size());
for (DatanodeDescriptor node : sortedDatanodeMap.values()) {
out.println(node.dumpDatanode());
@@ -660,7 +660,7 @@ public class DatanodeManager {
private void countSoftwareVersions() {
synchronized(datanodeMap) {
- HashMap<String, Integer> versionCount = new HashMap<String, Integer>();
+ HashMap<String, Integer> versionCount = new HashMap<>();
for(DatanodeDescriptor dn: datanodeMap.values()) {
// Check isAlive too because right after removeDatanode(),
// isDatanodeDead() is still true
@@ -677,7 +677,7 @@ public class DatanodeManager {
public HashMap<String, Integer> getDatanodesSoftwareVersions() {
synchronized(datanodeMap) {
- return new HashMap<String, Integer> (this.datanodesSoftwareVersions);
+ return new HashMap<> (this.datanodesSoftwareVersions);
}
}
@@ -710,7 +710,7 @@ public class DatanodeManager {
*/
private String resolveNetworkLocation (DatanodeID node)
throws UnresolvedTopologyException {
- List<String> names = new ArrayList<String>(1);
+ List<String> names = new ArrayList<>(1);
if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
names.add(node.getIpAddr());
} else {
@@ -1000,7 +1000,7 @@ public class DatanodeManager {
// If the network location is invalid, clear the cached mappings
// so that we have a chance to re-add this DataNode with the
// correct network location later.
- List<String> invalidNodeNames = new ArrayList<String>(3);
+ List<String> invalidNodeNames = new ArrayList<>(3);
// clear cache for nodes in IP or Hostname
invalidNodeNames.add(nodeReg.getIpAddr());
invalidNodeNames.add(nodeReg.getHostName());
@@ -1275,7 +1275,7 @@ public class DatanodeManager {
final HostFileManager.HostSet excludedNodes = hostFileManager.getExcludes();
synchronized(datanodeMap) {
- nodes = new ArrayList<DatanodeDescriptor>(datanodeMap.size());
+ nodes = new ArrayList<>(datanodeMap.size());
for (DatanodeDescriptor dn : datanodeMap.values()) {
final boolean isDead = isDatanodeDead(dn);
final boolean isDecommissioning = dn.isDecommissionInProgress();
@@ -1351,7 +1351,7 @@ public class DatanodeManager {
VolumeFailureSummary volumeFailureSummary) throws IOException {
synchronized (heartbeatManager) {
synchronized (datanodeMap) {
- DatanodeDescriptor nodeinfo = null;
+ DatanodeDescriptor nodeinfo;
try {
nodeinfo = getDatanode(nodeReg);
} catch(UnregisteredNodeException e) {
@@ -1389,7 +1389,7 @@ public class DatanodeManager {
final DatanodeStorageInfo[] storages = b.getExpectedStorageLocations();
// Skip stale nodes during recovery - not heart beated for some time (30s by default).
final List<DatanodeStorageInfo> recoveryLocations =
- new ArrayList<DatanodeStorageInfo>(storages.length);
+ new ArrayList<>(storages.length);
for (int i = 0; i < storages.length; i++) {
if (!storages[i].getDatanodeDescriptor().isStale(staleInterval)) {
recoveryLocations.add(storages[i]);
@@ -1431,7 +1431,7 @@ public class DatanodeManager {
return new DatanodeCommand[] { brCommand };
}
- final List<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>();
+ final List<DatanodeCommand> cmds = new ArrayList<>();
//check pending replication
List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand(
maxTransfers);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ffd8427/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index 65b83e1..92841a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -37,8 +37,9 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
public class DatanodeStorageInfo {
public static final DatanodeStorageInfo[] EMPTY_ARRAY = {};
- public static DatanodeInfo[] toDatanodeInfos(DatanodeStorageInfo[] storages) {
- return toDatanodeInfos(Arrays.asList(storages));
+ public static DatanodeInfo[] toDatanodeInfos(
+ DatanodeStorageInfo[] storages) {
+ return storages == null ? null: toDatanodeInfos(Arrays.asList(storages));
}
static DatanodeInfo[] toDatanodeInfos(List<DatanodeStorageInfo> storages) {
final DatanodeInfo[] datanodes = new DatanodeInfo[storages.size()];
@@ -58,6 +59,9 @@ public class DatanodeStorageInfo {
}
public static String[] toStorageIDs(DatanodeStorageInfo[] storages) {
+ if (storages == null) {
+ return null;
+ }
String[] storageIDs = new String[storages.length];
for(int i = 0; i < storageIDs.length; i++) {
storageIDs[i] = storages[i].getStorageID();
@@ -66,6 +70,9 @@ public class DatanodeStorageInfo {
}
public static StorageType[] toStorageTypes(DatanodeStorageInfo[] storages) {
+ if (storages == null) {
+ return null;
+ }
StorageType[] storageTypes = new StorageType[storages.length];
for(int i = 0; i < storageTypes.length; i++) {
storageTypes[i] = storages[i].getStorageType();
@@ -380,6 +387,6 @@ public class DatanodeStorageInfo {
}
static enum AddBlockResult {
- ADDED, REPLACED, ALREADY_EXIST;
+ ADDED, REPLACED, ALREADY_EXIST
}
}