You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2013/01/11 20:40:34 UTC
svn commit: r1432246 - in
/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project: hadoop-hdfs-httpfs/
hadoop-hdfs/
hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/
hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/a...
Author: suresh
Date: Fri Jan 11 19:40:23 2013
New Revision: 1432246
URL: http://svn.apache.org/viewvc?rev=1432246&view=rev
Log:
Merge r1414455:r1426018 from trunk
Added:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsDesign.apt.vm
- copied unchanged from r1426018, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsDesign.apt.vm
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/
- copied from r1426018, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfs-logo.jpg
- copied unchanged from r1426018, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfs-logo.jpg
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsarchitecture.gif
- copied unchanged from r1426018, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsarchitecture.gif
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsarchitecture.odg
- copied unchanged from r1426018, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsarchitecture.odg
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsarchitecture.png
- copied unchanged from r1426018, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsarchitecture.png
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsdatanodes.gif
- copied unchanged from r1426018, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsdatanodes.gif
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsdatanodes.odg
- copied unchanged from r1426018, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsdatanodes.odg
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsdatanodes.png
- copied unchanged from r1426018, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsdatanodes.png
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsproxy-forward.jpg
- copied unchanged from r1426018, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsproxy-forward.jpg
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsproxy-overview.jpg
- copied unchanged from r1426018, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsproxy-overview.jpg
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsproxy-server.jpg
- copied unchanged from r1426018, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsproxy-server.jpg
Removed:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_design.xml
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfs-logo.jpg
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsarchitecture.gif
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsarchitecture.odg
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsarchitecture.png
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsdatanodes.gif
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsdatanodes.odg
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsdatanodes.png
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsproxy-forward.jpg
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsproxy-overview.jpg
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsproxy-server.jpg
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_connect.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.h
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/fuse_workload.c
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestVolumeId.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1419191-1426018
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml Fri Jan 11 19:40:23 2013
@@ -34,7 +34,7 @@
<description>Apache Hadoop HttpFS</description>
<properties>
- <tomcat.version>6.0.32</tomcat.version>
+ <tomcat.version>6.0.36</tomcat.version>
<httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
<httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
<httpfs.source.revision>REVISION NOT AVAIL</httpfs.source.revision>
@@ -531,7 +531,7 @@
<mkdir dir="downloads"/>
<get
src="${tomcat.download.url}"
- dest="downloads/tomcat.tar.gz" verbose="true" skipexisting="true"/>
+ dest="downloads/apache-tomcat-${tomcat.version}.tar.gz" verbose="true" skipexisting="true"/>
<delete dir="${project.build.directory}/tomcat.exp"/>
<mkdir dir="${project.build.directory}/tomcat.exp"/>
@@ -545,7 +545,7 @@
BUILD_DIR=`cygpath --unix '${project.build.directory}'`
fi
cd $BUILD_DIR/tomcat.exp
- tar xzf ${basedir}/downloads/tomcat.tar.gz
+ tar xzf ${basedir}/downloads/apache-tomcat-${tomcat.version}.tar.gz
</echo>
<exec executable="sh" dir="${project.build.directory}" failonerror="true">
<arg line="./tomcat-untar.sh"/>
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Jan 11 19:40:23 2013
@@ -20,6 +20,8 @@ Trunk (Unreleased)
HDFS-3495. Update Balancer to support new NetworkTopology with NodeGroup.
(Junping Du via szetszwo)
+ HDFS-4296. Reserve layout version for release 1.2.0. (suresh)
+
IMPROVEMENTS
HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
@@ -269,6 +271,21 @@ Trunk (Unreleased)
HDFS-4260 Fix HDFS tests to set test dir to a valid HDFS path as opposed
to the local build path (Chri Nauroth via Sanjay)
+ HDFS-4269. Datanode rejects all datanode registrations from localhost
+ in single-node developer setup on Windows. (Chris Nauroth via suresh)
+
+ HADOOP-8957 HDFS tests for AbstractFileSystem#IsValidName should be overridden for
+ embedded file systems like ViewFs (Chris Nauroth via Sanjay Radia)
+
+ HDFS-4310. fix test org.apache.hadoop.hdfs.server.datanode.
+ TestStartSecureDataNode (Ivan A. Veselovsky via atm)
+
+ HDFS-4274. BlockPoolSliceScanner does not close verification log during
+ shutdown. (Chris Nauroth via suresh)
+
+ HDFS-4275. MiniDFSCluster-based tests fail on Windows due to failure
+ to delete test namenode directory. (Chris Nauroth via suresh)
+
Release 2.0.3-alpha - Unreleased
INCOMPATIBLE CHANGES
@@ -410,6 +427,11 @@ Release 2.0.3-alpha - Unreleased
HDFS-3680. Allow customized audit logging in HDFS FSNamesystem. (Marcelo
Vanzin via atm)
+ HDFS-4130. BKJM: The reading for editlog at NN starting using bkjm is not efficient.
+ (Han Xiao via umamahesh)
+
+ HDFS-4326. bump up Tomcat version for HttpFS to 6.0.36. (tucu via acmurthy)
+
OPTIMIZATIONS
BUG FIXES
@@ -581,6 +603,30 @@ Release 2.0.3-alpha - Unreleased
HDFS-4279. NameNode does not initialize generic conf keys when started
with -recover. (Colin Patrick McCabe via atm)
+ HDFS-4291. edit log unit tests leave stray test_edit_log_file around
+ (Colin Patrick McCabe via todd)
+
+ HDFS-4292. Sanity check not correct in RemoteBlockReader2.newBlockReader
+ (Binglin Chang via todd)
+
+ HDFS-4295. Using port 1023 should be valid when starting Secure DataNode
+ (Stephen Chu via todd)
+
+ HDFS-4294. Backwards compatibility is not maintained for TestVolumeId.
+ (Ivan A. Veselovsky and Robert Parker via atm)
+
+ HDFS-2264. NamenodeProtocol has the wrong value for clientPrincipal in
+ KerberosInfo annotation. (atm)
+
+ HDFS-4307. SocketCache should use monotonic time. (Colin Patrick McCabe
+ via atm)
+
+ HDFS-4315. DNs with multiple BPs can have BPOfferServices fail to start
+ due to unsynchronized map access. (atm)
+
+ HDFS-4140. fuse-dfs handles open(O_TRUNC) poorly. (Colin Patrick McCabe
+ via atm)
+
BREAKDOWN OF HDFS-3077 SUBTASKS
HDFS-3077. Quorum-based protocol for reading and writing edit logs.
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java Fri Jan 11 19:40:23 2013
@@ -500,16 +500,18 @@ public class BookKeeperJournalManager im
}
}
- EditLogInputStream getInputStream(long fromTxId, boolean inProgressOk)
- throws IOException {
- for (EditLogLedgerMetadata l : getLedgerList(inProgressOk)) {
- long lastTxId = l.getLastTxId();
- if (l.isInProgress()) {
- lastTxId = recoverLastTxId(l, false);
- }
-
- if (fromTxId >= l.getFirstTxId() && fromTxId <= lastTxId) {
- try {
+ @Override
+ public void selectInputStreams(Collection<EditLogInputStream> streams,
+ long fromTxId, boolean inProgressOk) throws IOException {
+ List<EditLogLedgerMetadata> currentLedgerList = getLedgerList(inProgressOk);
+ try {
+ BookKeeperEditLogInputStream elis = null;
+ for (EditLogLedgerMetadata l : currentLedgerList) {
+ long lastTxId = l.getLastTxId();
+ if (l.isInProgress()) {
+ lastTxId = recoverLastTxId(l, false);
+ }
+ if (fromTxId >= l.getFirstTxId() && fromTxId <= lastTxId) {
LedgerHandle h;
if (l.isInProgress()) { // we don't want to fence the current journal
h = bkc.openLedgerNoRecovery(l.getLedgerId(),
@@ -518,42 +520,22 @@ public class BookKeeperJournalManager im
h = bkc.openLedger(l.getLedgerId(), BookKeeper.DigestType.MAC,
digestpw.getBytes());
}
- BookKeeperEditLogInputStream s = new BookKeeperEditLogInputStream(h,
- l);
- s.skipTo(fromTxId);
- return s;
- } catch (BKException e) {
- throw new IOException("Could not open ledger for " + fromTxId, e);
- } catch (InterruptedException ie) {
- Thread.currentThread().interrupt();
- throw new IOException("Interrupted opening ledger for "
- + fromTxId, ie);
+ elis = new BookKeeperEditLogInputStream(h, l);
+ elis.skipTo(fromTxId);
+ } else {
+ return;
}
+ streams.add(elis);
+ if (elis.getLastTxId() == HdfsConstants.INVALID_TXID) {
+ return;
+ }
+ fromTxId = elis.getLastTxId() + 1;
}
- }
- return null;
- }
-
- @Override
- public void selectInputStreams(Collection<EditLogInputStream> streams,
- long fromTxId, boolean inProgressOk) {
- // NOTE: could probably be rewritten more efficiently
- while (true) {
- EditLogInputStream elis;
- try {
- elis = getInputStream(fromTxId, inProgressOk);
- } catch (IOException e) {
- LOG.error(e);
- return;
- }
- if (elis == null) {
- return;
- }
- streams.add(elis);
- if (elis.getLastTxId() == HdfsConstants.INVALID_TXID) {
- return;
- }
- fromTxId = elis.getLastTxId() + 1;
+ } catch (BKException e) {
+ throw new IOException("Could not open ledger for " + fromTxId, e);
+ } catch (InterruptedException ie) {
+ Thread.currentThread().interrupt();
+ throw new IOException("Interrupted opening ledger for " + fromTxId, ie);
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java Fri Jan 11 19:40:23 2013
@@ -28,6 +28,7 @@ import org.mockito.Mockito;
import java.io.IOException;
import java.net.URI;
+import java.util.ArrayList;
import java.util.List;
import java.util.ArrayList;
import java.util.Random;
@@ -315,13 +316,13 @@ public class TestBookKeeperJournalManage
out.close();
bkjm.finalizeLogSegment(1, numTransactions);
-
- EditLogInputStream in = bkjm.getInputStream(1, true);
+ List<EditLogInputStream> in = new ArrayList<EditLogInputStream>();
+ bkjm.selectInputStreams(in, 1, true);
try {
assertEquals(numTransactions,
- FSEditLogTestUtil.countTransactionsInStream(in));
+ FSEditLogTestUtil.countTransactionsInStream(in.get(0)));
} finally {
- in.close();
+ in.get(0).close();
}
}
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1419191-1426018
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java Fri Jan 11 19:40:23 2013
@@ -404,7 +404,7 @@ public class RemoteBlockReader extends F
long firstChunkOffset = checksumInfo.getChunkOffset();
if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
- firstChunkOffset >= (startOffset + checksum.getBytesPerChecksum())) {
+ firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
throw new IOException("BlockReader: error in first chunk offset (" +
firstChunkOffset + ") startOffset is " +
startOffset + " for file " + file);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java Fri Jan 11 19:40:23 2013
@@ -413,7 +413,7 @@ public class RemoteBlockReader2 impleme
long firstChunkOffset = checksumInfo.getChunkOffset();
if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
- firstChunkOffset >= (startOffset + checksum.getBytesPerChecksum())) {
+ firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
throw new IOException("BlockReader: error in first chunk offset (" +
firstChunkOffset + ") startOffset is " +
startOffset + " for file " + file);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java Fri Jan 11 19:40:23 2013
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.d
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
/**
* A cache of input stream sockets to Data Node.
@@ -53,7 +54,7 @@ class SocketCache {
public SocketAndStreams(Socket s, IOStreamPair ioStreams) {
this.sock = s;
this.ioStreams = ioStreams;
- this.createTime = System.currentTimeMillis();
+ this.createTime = Time.monotonicNow();
}
@Override
@@ -205,7 +206,7 @@ class SocketCache {
Entry<SocketAddress, SocketAndStreams> entry = iter.next();
// if oldest socket expired, remove it
if (entry == null ||
- System.currentTimeMillis() - entry.getValue().getCreateTime() <
+ Time.monotonicNow() - entry.getValue().getCreateTime() <
expiryPeriod) {
break;
}
@@ -236,13 +237,13 @@ class SocketCache {
* older than expiryPeriod minutes
*/
private void run() throws InterruptedException {
- for(long lastExpiryTime = System.currentTimeMillis();
+ for(long lastExpiryTime = Time.monotonicNow();
!Thread.interrupted();
Thread.sleep(expiryPeriod)) {
- final long elapsed = System.currentTimeMillis() - lastExpiryTime;
+ final long elapsed = Time.monotonicNow() - lastExpiryTime;
if (elapsed >= expiryPeriod) {
evictExpired(expiryPeriod);
- lastExpiryTime = System.currentTimeMillis();
+ lastExpiryTime = Time.monotonicNow();
}
}
clear();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java Fri Jan 11 19:40:23 2013
@@ -82,10 +82,11 @@ public class LayoutVersion {
EDITS_CHESKUM(-28, "Support checksum for editlog"),
UNUSED(-29, "Skipped version"),
FSIMAGE_NAME_OPTIMIZATION(-30, "Store only last part of path in fsimage"),
- RESERVED_REL20_203(-31, -19, "Reserved for release 0.20.203"),
- RESERVED_REL20_204(-32, "Reserved for release 0.20.204"),
- RESERVED_REL22(-33, -27, "Reserved for release 0.22"),
- RESERVED_REL23(-34, -30, "Reserved for release 0.23"),
+ RESERVED_REL20_203(-31, -19, "Reserved for release 0.20.203", true,
+ DELEGATION_TOKEN),
+ RESERVED_REL20_204(-32, -31, "Reserved for release 0.20.204", true),
+ RESERVED_REL22(-33, -27, "Reserved for release 0.22", true),
+ RESERVED_REL23(-34, -30, "Reserved for release 0.23", true),
FEDERATION(-35, "Support for namenode federation"),
LEASE_REASSIGNMENT(-36, "Support for persisting lease holder reassignment"),
STORED_TXIDS(-37, "Transaction IDs are stored in edits log and image files"),
@@ -95,33 +96,40 @@ public class LayoutVersion {
OPTIMIZE_PERSIST_BLOCKS(-40,
"Serialize block lists with delta-encoded variable length ints, " +
"add OP_UPDATE_BLOCKS"),
- SNAPSHOT(-41, "Support for snapshot feature");
+ RESERVED_REL1_2_0(-41, -32, "Reserved for release 1.2.0", true, CONCAT),
+ SNAPSHOT(-41, -39, "Support for snapshot feature", false);
final int lv;
final int ancestorLV;
final String description;
+ final boolean reserved;
+ final Feature[] specialFeatures;
/**
- * Feature that is added at {@code currentLV}.
+ * Feature that is added at layout version {@code lv} - 1.
* @param lv new layout version with the addition of this feature
* @param description description of the feature
*/
Feature(final int lv, final String description) {
- this(lv, lv + 1, description);
+ this(lv, lv + 1, description, false);
}
/**
- * Feature that is added at {@code currentLV}.
+ * Feature that is added at layout version {@code ancestoryLV}.
* @param lv new layout version with the addition of this feature
- * @param ancestorLV layout version from which the new lv is derived
- * from.
+ * @param ancestorLV layout version from which the new lv is derived from.
* @param description description of the feature
+ * @param reserved true when this is a layout version reserved for previous
+ * verions
+ * @param features set of features that are to be enabled for this version
*/
- Feature(final int lv, final int ancestorLV,
- final String description) {
+ Feature(final int lv, final int ancestorLV, final String description,
+ boolean reserved, Feature... features) {
this.lv = lv;
this.ancestorLV = ancestorLV;
this.description = description;
+ this.reserved = reserved;
+ specialFeatures = features;
}
/**
@@ -147,6 +155,10 @@ public class LayoutVersion {
public String getDescription() {
return description;
}
+
+ public boolean isReservedForOldRelease() {
+ return reserved;
+ }
}
// Build layout version and corresponding feature matrix
@@ -172,19 +184,14 @@ public class LayoutVersion {
map.put(f.ancestorLV, ancestorSet);
}
EnumSet<Feature> featureSet = EnumSet.copyOf(ancestorSet);
+ if (f.specialFeatures != null) {
+ for (Feature specialFeature : f.specialFeatures) {
+ featureSet.add(specialFeature);
+ }
+ }
featureSet.add(f);
map.put(f.lv, featureSet);
}
-
- // Special initialization for 0.20.203 and 0.20.204
- // to add Feature#DELEGATION_TOKEN
- specialInit(Feature.RESERVED_REL20_203.lv, Feature.DELEGATION_TOKEN);
- specialInit(Feature.RESERVED_REL20_204.lv, Feature.DELEGATION_TOKEN);
- }
-
- private static void specialInit(int lv, Feature f) {
- EnumSet<Feature> set = map.get(lv);
- set.add(f);
}
/**
@@ -223,6 +230,11 @@ public class LayoutVersion {
*/
public static int getCurrentLayoutVersion() {
Feature[] values = Feature.values();
- return values[values.length - 1].lv;
+ for (int i = values.length -1; i >= 0; i--) {
+ if (!values[i].isReservedForOldRelease()) {
+ return values[i].lv;
+ }
+ }
+ throw new AssertionError("All layout versions are reserved.");
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Fri Jan 11 19:40:23 2013
@@ -633,7 +633,9 @@ public class DatanodeManager {
// Mostly called inside an RPC, update ip and peer hostname
String hostname = dnAddress.getHostName();
String ip = dnAddress.getHostAddress();
- if (hostname.equals(ip)) {
+ if (!isNameResolved(dnAddress)) {
+ // Reject registration of unresolved datanode to prevent performance
+ // impact of repetitive DNS lookups later.
LOG.warn("Unresolved datanode registration from " + ip);
throw new DisallowedDatanodeException(nodeReg);
}
@@ -1061,6 +1063,22 @@ public class DatanodeManager {
}
return names;
}
+
+ /**
+ * Checks if name resolution was successful for the given address. If IP
+ * address and host name are the same, then it means name resolution has
+ * failed. As a special case, the loopback address is also considered
+ * acceptable. This is particularly important on Windows, where 127.0.0.1 does
+ * not resolve to "localhost".
+ *
+ * @param address InetAddress to check
+ * @return boolean true if name resolution successful or address is loopback
+ */
+ private static boolean isNameResolved(InetAddress address) {
+ String hostname = address.getHostName();
+ String ip = address.getHostAddress();
+ return !hostname.equals(ip) || address.isLoopbackAddress();
+ }
private void setDatanodeDead(DatanodeDescriptor node) {
node.setLastUpdate(0);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Fri Jan 11 19:40:23 2013
@@ -602,6 +602,15 @@ class BlockPoolSliceScanner {
lastScanTime.set(Time.now());
}
}
+
+ /**
+ * Shuts down this BlockPoolSliceScanner and releases any internal resources.
+ */
+ void shutdown() {
+ if (verificationLog != null) {
+ verificationLog.close();
+ }
+ }
private void scan() {
if (LOG.isDebugEnabled()) {
@@ -610,7 +619,8 @@ class BlockPoolSliceScanner {
try {
adjustThrottler();
- while (datanode.shouldRun && !Thread.interrupted()
+ while (datanode.shouldRun
+ && !datanode.blockScanner.blockScannerThread.isInterrupted()
&& datanode.isBPServiceAlive(blockPoolId)) {
long now = Time.now();
synchronized (this) {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java Fri Jan 11 19:40:23 2013
@@ -100,6 +100,11 @@ public class DataBlockScanner implements
}
bpScanner.scanBlockPoolSlice();
}
+
+ // Call shutdown for each allocated BlockPoolSliceScanner.
+ for (BlockPoolSliceScanner bpss: blockPoolScannerMap.values()) {
+ bpss.shutdown();
+ }
}
// Wait for at least one block pool to be up
@@ -232,9 +237,21 @@ public class DataBlockScanner implements
}
}
- public synchronized void shutdown() {
+ public void shutdown() {
+ synchronized (this) {
+ if (blockScannerThread != null) {
+ blockScannerThread.interrupt();
+ }
+ }
+
+ // We cannot join within the synchronized block, because it would create a
+ // deadlock situation. blockScannerThread calls other synchronized methods.
if (blockScannerThread != null) {
- blockScannerThread.interrupt();
+ try {
+ blockScannerThread.join();
+ } catch (InterruptedException e) {
+ // shutting down anyway
+ }
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Fri Jan 11 19:40:23 2013
@@ -26,6 +26,7 @@ import java.io.RandomAccessFile;
import java.nio.channels.FileLock;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
@@ -78,7 +79,7 @@ public class DataStorage extends Storage
// BlockPoolStorage is map of <Block pool Id, BlockPoolStorage>
private Map<String, BlockPoolSliceStorage> bpStorageMap
- = new HashMap<String, BlockPoolSliceStorage>();
+ = Collections.synchronizedMap(new HashMap<String, BlockPoolSliceStorage>());
DataStorage() {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java Fri Jan 11 19:40:23 2013
@@ -140,7 +140,7 @@ public class SecureDataNodeStarter imple
System.err.println("Successfully obtained privileged resources (streaming port = "
+ ss + " ) (http listener port = " + listener.getConnection() +")");
- if ((ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) &&
+ if ((ss.getLocalPort() > 1023 || listener.getPort() > 1023) &&
UserGroupInformation.isSecurityEnabled()) {
throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Fri Jan 11 19:40:23 2013
@@ -339,11 +339,13 @@ class NameNodeRpcServer implements Namen
"Unexpected not positive size: "+size);
}
namesystem.checkOperation(OperationCategory.READ);
+ namesystem.checkSuperuserPrivilege();
return namesystem.getBlockManager().getBlocks(datanode, size);
}
@Override // NamenodeProtocol
public ExportedBlockKeys getBlockKeys() throws IOException {
+ namesystem.checkSuperuserPrivilege();
return namesystem.getBlockManager().getBlockKeys();
}
@@ -352,6 +354,7 @@ class NameNodeRpcServer implements Namen
int errorCode,
String msg) throws IOException {
namesystem.checkOperation(OperationCategory.UNCHECKED);
+ namesystem.checkSuperuserPrivilege();
verifyRequest(registration);
LOG.info("Error report from " + registration + ": " + msg);
if (errorCode == FATAL) {
@@ -362,6 +365,7 @@ class NameNodeRpcServer implements Namen
@Override // NamenodeProtocol
public NamenodeRegistration register(NamenodeRegistration registration)
throws IOException {
+ namesystem.checkSuperuserPrivilege();
verifyLayoutVersion(registration.getVersion());
NamenodeRegistration myRegistration = nn.setRegistration();
namesystem.registerBackupNode(registration, myRegistration);
@@ -371,6 +375,7 @@ class NameNodeRpcServer implements Namen
@Override // NamenodeProtocol
public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
throws IOException {
+ namesystem.checkSuperuserPrivilege();
verifyRequest(registration);
if(!nn.isRole(NamenodeRole.NAMENODE))
throw new IOException("Only an ACTIVE node can invoke startCheckpoint.");
@@ -380,6 +385,7 @@ class NameNodeRpcServer implements Namen
@Override // NamenodeProtocol
public void endCheckpoint(NamenodeRegistration registration,
CheckpointSignature sig) throws IOException {
+ namesystem.checkSuperuserPrivilege();
namesystem.endCheckpoint(registration, sig);
}
@@ -756,17 +762,20 @@ class NameNodeRpcServer implements Namen
@Override // NamenodeProtocol
public long getTransactionID() throws IOException {
namesystem.checkOperation(OperationCategory.UNCHECKED);
+ namesystem.checkSuperuserPrivilege();
return namesystem.getFSImage().getLastAppliedOrWrittenTxId();
}
@Override // NamenodeProtocol
public long getMostRecentCheckpointTxId() throws IOException {
namesystem.checkOperation(OperationCategory.UNCHECKED);
+ namesystem.checkSuperuserPrivilege();
return namesystem.getFSImage().getMostRecentCheckpointTxId();
}
@Override // NamenodeProtocol
public CheckpointSignature rollEditLog() throws IOException {
+ namesystem.checkSuperuserPrivilege();
return namesystem.rollEditLog();
}
@@ -774,6 +783,7 @@ class NameNodeRpcServer implements Namen
public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
throws IOException {
namesystem.checkOperation(OperationCategory.READ);
+ namesystem.checkSuperuserPrivilege();
return namesystem.getEditLog().getEditLogManifest(sinceTxId);
}
@@ -950,6 +960,7 @@ class NameNodeRpcServer implements Namen
@Override // DatanodeProtocol, NamenodeProtocol
public NamespaceInfo versionRequest() throws IOException {
+ namesystem.checkSuperuserPrivilege();
return namesystem.getNamespaceInfo();
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java Fri Jan 11 19:40:23 2013
@@ -32,8 +32,7 @@ import org.apache.hadoop.security.Kerber
* It's used to get part of the name node state
*****************************************************************************/
@KerberosInfo(
- serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
- clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
+ serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
@InterfaceAudience.Private
public interface NamenodeProtocol {
/**
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1419191-1426018
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_connect.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_connect.c?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_connect.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_connect.c Fri Jan 11 19:40:23 2013
@@ -131,7 +131,6 @@ static enum authConf discoverAuthConf(vo
int fuseConnectInit(const char *nnUri, int port)
{
- const char *timerPeriod;
int ret;
gTimerPeriod = FUSE_CONN_DEFAULT_TIMER_PERIOD;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c Fri Jan 11 19:40:23 2013
@@ -24,12 +24,77 @@
#include <stdio.h>
#include <stdlib.h>
+static int get_hdfs_open_flags_from_info(hdfsFS fs, const char *path,
+ int flags, int *outflags, const hdfsFileInfo *info);
+
+/**
+ * Given a set of FUSE flags, determine the libhdfs flags we need.
+ *
+ * This is complicated by two things:
+ * 1. libhdfs doesn't support O_RDWR at all;
+ * 2. when given O_WRONLY, libhdfs will truncate the file unless O_APPEND is
+ * also given. In other words, there is an implicit O_TRUNC.
+ *
+ * Probably the next iteration of the libhdfs interface should not use the POSIX
+ * flags at all, since, as you can see, they don't really match up very closely
+ * to the POSIX meaning. However, for the time being, this is the API.
+ *
+ * @param fs The libhdfs object
+ * @param path The path we're opening
+ * @param flags The FUSE flags
+ *
+ * @return negative error code on failure; flags otherwise.
+ */
+static int64_t get_hdfs_open_flags(hdfsFS fs, const char *path, int flags)
+{
+ int hasContent;
+ int64_t ret;
+ hdfsFileInfo *info;
+
+ if ((flags & O_ACCMODE) == O_RDONLY) {
+ return O_RDONLY;
+ }
+ if (flags & O_TRUNC) {
+ /* If we're opening for write or read/write, O_TRUNC means we should blow
+ * away the file which is there and create our own file.
+ * */
+ return O_WRONLY;
+ }
+ info = hdfsGetPathInfo(fs, path);
+ if (info) {
+ if (info->mSize == 0) {
+ // If the file has zero length, we shouldn't feel bad about blowing it
+ // away.
+ ret = O_WRONLY;
+ } else if ((flags & O_ACCMODE) == O_RDWR) {
+ // HACK: translate O_RDWR requests into O_RDONLY if the file already
+ // exists and has non-zero length.
+ ret = O_RDONLY;
+ } else { // O_WRONLY
+ // HACK: translate O_WRONLY requests into append if the file already
+ // exists.
+ ret = O_WRONLY | O_APPEND;
+ }
+ } else { // !info
+ if (flags & O_CREAT) {
+ ret = O_WRONLY;
+ } else {
+ ret = -ENOENT;
+ }
+ }
+ if (info) {
+ hdfsFreeFileInfo(info, 1);
+ }
+ return ret;
+}
+
int dfs_open(const char *path, struct fuse_file_info *fi)
{
hdfsFS fs = NULL;
dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
dfs_fh *fh = NULL;
- int mutexInit = 0, ret;
+ int mutexInit = 0, ret, flags = 0;
+ int64_t flagRet;
TRACE1("open", path)
@@ -38,10 +103,6 @@ int dfs_open(const char *path, struct fu
assert('/' == *path);
assert(dfs);
- // 0x8000 is always passed in and hadoop doesn't like it, so killing it here
- // bugbug figure out what this flag is and report problem to Hadoop JIRA
- int flags = (fi->flags & 0x7FFF);
-
// retrieve dfs specific data
fh = (dfs_fh*)calloc(1, sizeof (dfs_fh));
if (!fh) {
@@ -57,22 +118,12 @@ int dfs_open(const char *path, struct fu
goto error;
}
fs = hdfsConnGetFs(fh->conn);
-
- if (flags & O_RDWR) {
- hdfsFileInfo *info = hdfsGetPathInfo(fs, path);
- if (info == NULL) {
- // File does not exist (maybe?); interpret it as a O_WRONLY
- // If the actual error was something else, we'll get it again when
- // we try to open the file.
- flags ^= O_RDWR;
- flags |= O_WRONLY;
- } else {
- // File exists; open this as read only.
- flags ^= O_RDWR;
- flags |= O_RDONLY;
- }
+ flagRet = get_hdfs_open_flags(fs, path, fi->flags);
+ if (flagRet < 0) {
+ ret = -flagRet;
+ goto error;
}
-
+ flags = flagRet;
if ((fh->hdfsFH = hdfsOpenFile(fs, path, flags, 0, 0, 0)) == NULL) {
ERROR("Could not open file %s (errno=%d)", path, errno);
if (errno == 0 || errno == EINTERNAL) {
@@ -91,7 +142,7 @@ int dfs_open(const char *path, struct fu
}
mutexInit = 1;
- if (fi->flags & O_WRONLY || fi->flags & O_CREAT) {
+ if ((flags & O_ACCMODE) == O_WRONLY) {
fh->buf = NULL;
} else {
assert(dfs->rdbuffer_size > 0);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c Fri Jan 11 19:40:23 2013
@@ -98,7 +98,7 @@ static void dfsPrintOptions(FILE *fp, co
o->attribute_timeout, o->rdbuffer_size, o->direct_io);
}
-void *dfs_init(void)
+void *dfs_init(struct fuse_conn_info *conn)
{
int ret;
@@ -143,6 +143,45 @@ void *dfs_init(void)
exit(EXIT_FAILURE);
}
}
+
+#ifdef FUSE_CAP_ATOMIC_O_TRUNC
+ // If FUSE_CAP_ATOMIC_O_TRUNC is set, open("foo", O_CREAT | O_TRUNC) will
+ // result in dfs_open being called with O_TRUNC.
+ //
+ // If this capability is not present, fuse will try to use multiple
+ // operation to "simulate" open(O_TRUNC). This doesn't work very well with
+ // HDFS.
+ // Unfortunately, this capability is only implemented on Linux 2.6.29 or so.
+ // See HDFS-4140 for details.
+ if (conn->capable & FUSE_CAP_ATOMIC_O_TRUNC) {
+ conn->want |= FUSE_CAP_ATOMIC_O_TRUNC;
+ }
+#endif
+
+#ifdef FUSE_CAP_ASYNC_READ
+ // We're OK with doing reads at the same time as writes.
+ if (conn->capable & FUSE_CAP_ASYNC_READ) {
+ conn->want |= FUSE_CAP_ASYNC_READ;
+ }
+#endif
+
+#ifdef FUSE_CAP_BIG_WRITES
+ // Yes, we can read more than 4kb at a time. In fact, please do!
+ if (conn->capable & FUSE_CAP_BIG_WRITES) {
+ conn->want |= FUSE_CAP_BIG_WRITES;
+ }
+#endif
+
+#ifdef FUSE_CAP_DONT_MASK
+ if ((options.no_permissions) && (conn->capable & FUSE_CAP_DONT_MASK)) {
+ // If we're handing permissions ourselves, we don't want the kernel
+ // applying its own umask. HDFS already implements its own per-user
+ // umasks! Sadly, this only actually does something on kernels 2.6.31 and
+ // later.
+ conn->want |= FUSE_CAP_DONT_MASK;
+ }
+#endif
+
return (void*)dfs;
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.h?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.h (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.h Fri Jan 11 19:40:23 2013
@@ -19,13 +19,15 @@
#ifndef __FUSE_INIT_H__
#define __FUSE_INIT_H__
+struct fuse_conn_info;
+
/**
* These are responsible for initializing connections to dfs and internal
* data structures and then freeing them.
* i.e., what happens on mount and unmount.
*
*/
-void *dfs_init();
+void *dfs_init(struct fuse_conn_info *conn);
void dfs_destroy (void *ptr);
#endif
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/fuse_workload.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/fuse_workload.c?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/fuse_workload.c (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/fuse_workload.c Fri Jan 11 19:40:23 2013
@@ -16,6 +16,8 @@
* limitations under the License.
*/
+#define FUSE_USE_VERSION 26
+
#include "fuse-dfs/test/fuse_workload.h"
#include "libhdfs/expect.h"
#include "util/posix_util.h"
@@ -23,6 +25,7 @@
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
+#include <fuse.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
@@ -138,13 +141,89 @@ static int safeRead(int fd, void *buf, i
return amt;
}
+/* Bug: HDFS-2551.
+ * When a program writes a file, closes it, and immediately re-opens it,
+ * it might not appear to have the correct length. This is because FUSE
+ * invokes the release() callback asynchronously.
+ *
+ * To work around this, we keep retrying until the file length is what we
+ * expect.
+ */
+static int closeWorkaroundHdfs2551(int fd, const char *path, off_t expectedSize)
+{
+ int ret, try;
+ struct stat stBuf;
+
+ RETRY_ON_EINTR_GET_ERRNO(ret, close(fd));
+ EXPECT_ZERO(ret);
+ for (try = 0; try < MAX_TRIES; try++) {
+ EXPECT_ZERO(stat(path, &stBuf));
+ EXPECT_NONZERO(S_ISREG(stBuf.st_mode));
+ if (stBuf.st_size == expectedSize) {
+ return 0;
+ }
+ sleepNoSig(1);
+ }
+ fprintf(stderr, "FUSE_WORKLOAD: error: expected file %s to have length "
+ "%lld; instead, it had length %lld\n",
+ path, (long long)expectedSize, (long long)stBuf.st_size);
+ return -EIO;
+}
+
+#ifdef FUSE_CAP_ATOMIC_O_TRUNC
+
+/**
+ * Test that we can create a file, write some contents to it, close that file,
+ * and then successfully re-open with O_TRUNC.
+ */
+static int testOpenTrunc(const char *base)
+{
+ int fd, err;
+ char path[PATH_MAX];
+ const char * const SAMPLE1 = "this is the first file that we wrote.";
+ const char * const SAMPLE2 = "this is the second file that we wrote. "
+ "It's #2!";
+
+ snprintf(path, sizeof(path), "%s/trunc.txt", base);
+ fd = open(path, O_CREAT | O_TRUNC | O_WRONLY, 0644);
+ if (fd < 0) {
+ err = errno;
+ fprintf(stderr, "TEST_ERROR: testOpenTrunc(%s): first open "
+ "failed with error %d\n", path, err);
+ return -err;
+ }
+ EXPECT_ZERO(safeWrite(fd, SAMPLE1, strlen(SAMPLE1)));
+ EXPECT_ZERO(closeWorkaroundHdfs2551(fd, path, strlen(SAMPLE1)));
+ fd = open(path, O_CREAT | O_TRUNC | O_WRONLY, 0644);
+ if (fd < 0) {
+ err = errno;
+ fprintf(stderr, "TEST_ERROR: testOpenTrunc(%s): second open "
+ "failed with error %d\n", path, err);
+ return -err;
+ }
+ EXPECT_ZERO(safeWrite(fd, SAMPLE2, strlen(SAMPLE2)));
+ EXPECT_ZERO(closeWorkaroundHdfs2551(fd, path, strlen(SAMPLE2)));
+ return 0;
+}
+
+#else
+
+static int testOpenTrunc(const char *base)
+{
+ fprintf(stderr, "FUSE_WORKLOAD: We lack FUSE_CAP_ATOMIC_O_TRUNC support. "
+ "Not testing open(O_TRUNC).\n");
+ return 0;
+}
+
+#endif
+
int runFuseWorkloadImpl(const char *root, const char *pcomp,
struct fileCtx *ctx)
{
char base[PATH_MAX], tmp[PATH_MAX], *tmpBuf;
char src[PATH_MAX], dst[PATH_MAX];
struct stat stBuf;
- int ret, i, try;
+ int ret, i;
struct utimbuf tbuf;
struct statvfs stvBuf;
@@ -241,35 +320,10 @@ int runFuseWorkloadImpl(const char *root
EXPECT_ZERO(safeWrite(ctx[i].fd, ctx[i].str, ctx[i].strLen));
}
for (i = 0; i < NUM_FILE_CTX; i++) {
- RETRY_ON_EINTR_GET_ERRNO(ret, close(ctx[i].fd));
- EXPECT_ZERO(ret);
+ EXPECT_ZERO(closeWorkaroundHdfs2551(ctx[i].fd, ctx[i].path, ctx[i].strLen));
ctx[i].fd = -1;
}
for (i = 0; i < NUM_FILE_CTX; i++) {
- /* Bug: HDFS-2551.
- * When a program writes a file, closes it, and immediately re-opens it,
- * it might not appear to have the correct length. This is because FUSE
- * invokes the release() callback asynchronously.
- *
- * To work around this, we keep retrying until the file length is what we
- * expect.
- */
- for (try = 0; try < MAX_TRIES; try++) {
- EXPECT_ZERO(stat(ctx[i].path, &stBuf));
- EXPECT_NONZERO(S_ISREG(stBuf.st_mode));
- if (ctx[i].strLen == stBuf.st_size) {
- break;
- }
- sleepNoSig(1);
- }
- if (try == MAX_TRIES) {
- fprintf(stderr, "FUSE_WORKLOAD: error: expected file %s to have length "
- "%d; instead, it had length %lld\n",
- ctx[i].path, ctx[i].strLen, (long long)stBuf.st_size);
- return -EIO;
- }
- }
- for (i = 0; i < NUM_FILE_CTX; i++) {
ctx[i].fd = open(ctx[i].path, O_RDONLY);
if (ctx[i].fd < 0) {
fprintf(stderr, "FUSE_WORKLOAD: Failed to open file %s for reading!\n",
@@ -308,6 +362,7 @@ int runFuseWorkloadImpl(const char *root
for (i = 0; i < NUM_FILE_CTX; i++) {
free(ctx[i].path);
}
+ EXPECT_ZERO(testOpenTrunc(base));
EXPECT_ZERO(recursiveDelete(base));
return 0;
}
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1419191-1426018
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1419191-1426018
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1419191-1426018
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1419191-1426018
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java Fri Jan 11 19:40:23 2013
@@ -255,7 +255,22 @@ public class TestHDFSFileContextMainOper
Assert.assertFalse(fs.exists(src1)); // ensure src1 is already renamed
Assert.assertTrue(fs.exists(dst1)); // ensure rename dst exists
}
-
+
+ @Test
+ public void testIsValidNameInvalidNames() {
+ String[] invalidNames = {
+ "/foo/../bar",
+ "/foo/./bar",
+ "/foo/:/bar",
+ "/foo:bar"
+ };
+
+ for (String invalidName: invalidNames) {
+ Assert.assertFalse(invalidName + " is not valid",
+ fc.getDefaultFileSystem().isValidName(invalidName));
+ }
+ }
+
private void oldRename(Path src, Path dst, boolean renameSucceeds,
boolean exception) throws Exception {
DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestVolumeId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestVolumeId.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestVolumeId.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestVolumeId.java Fri Jan 11 19:40:23 2013
@@ -51,7 +51,7 @@ public class TestVolumeId {
}
@SuppressWarnings("unchecked")
- private <T> void testEq(final boolean eq, Comparable<? super T> id1, Comparable<? super T> id2) {
+ private <T> void testEq(final boolean eq, Comparable<T> id1, Comparable<T> id2) {
final int h1 = id1.hashCode();
final int h2 = id2.hashCode();
@@ -99,8 +99,8 @@ public class TestVolumeId {
}
@SuppressWarnings("unchecked")
- private <T> void testEqMany(final boolean eq, Comparable<? super T>... volumeIds) {
- Comparable<? super T> vidNext;
+ private <T> void testEqMany(final boolean eq, Comparable<T>... volumeIds) {
+ Comparable<T> vidNext;
int sum = 0;
for (int i=0; i<volumeIds.length; i++) {
if (i == volumeIds.length - 1) {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Fri Jan 11 19:40:23 2013
@@ -624,8 +624,11 @@ public class TestDFSUtil {
@Test
public void testIsValidName() {
assertFalse(DFSUtil.isValidName("/foo/../bar"));
+ assertFalse(DFSUtil.isValidName("/foo/./bar"));
assertFalse(DFSUtil.isValidName("/foo//bar"));
assertTrue(DFSUtil.isValidName("/"));
assertTrue(DFSUtil.isValidName("/bar/"));
+ assertFalse(DFSUtil.isValidName("/foo/:/bar"));
+ assertFalse(DFSUtil.isValidName("/foo:bar"));
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java Fri Jan 11 19:40:23 2013
@@ -60,6 +60,15 @@ public class TestLayoutVersion {
}
/**
+ * Test to make sure release 1.2.0 support CONCAT
+ */
+ @Test
+ public void testRelease1_2_0() {
+ assertTrue(LayoutVersion.supports(Feature.CONCAT,
+ Feature.RESERVED_REL1_2_0.lv));
+ }
+
+ /**
* Given feature {@code f}, ensures the layout version of that feature
* supports all the features supported by it's ancestor.
*/
@@ -69,7 +78,9 @@ public class TestLayoutVersion {
EnumSet<Feature> ancestorSet = LayoutVersion.map.get(ancestorLV);
assertNotNull(ancestorSet);
for (Feature feature : ancestorSet) {
- assertTrue(LayoutVersion.supports(feature, lv));
+ assertTrue("LV " + lv + " does nto support " + feature
+ + " supported by the ancestor LV " + f.ancestorLV,
+ LayoutVersion.supports(feature, lv));
}
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java Fri Jan 11 19:40:23 2013
@@ -97,6 +97,7 @@ public class TestBlockRecovery {
MiniDFSCluster.getBaseDirectory() + "data";
private DataNode dn;
private Configuration conf;
+ private boolean tearDownDone;
private final static long RECOVERY_ID = 3000L;
private final static String CLUSTER_ID = "testClusterID";
private final static String POOL_ID = "BP-TEST";
@@ -121,6 +122,7 @@ public class TestBlockRecovery {
*/
@Before
public void startUp() throws IOException {
+ tearDownDone = false;
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR);
conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
@@ -177,7 +179,7 @@ public class TestBlockRecovery {
*/
@After
public void tearDown() throws IOException {
- if (dn != null) {
+ if (!tearDownDone && dn != null) {
try {
dn.shutdown();
} catch(Exception e) {
@@ -188,6 +190,7 @@ public class TestBlockRecovery {
Assert.assertTrue(
"Cannot delete data-node dirs", FileUtil.fullyDelete(dir));
}
+ tearDownDone = true;
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java Fri Jan 11 19:40:23 2013
@@ -17,24 +17,14 @@
package org.apache.hadoop.hdfs.server.datanode;
-import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning;
import org.junit.Assume;
import org.junit.Before;
@@ -67,7 +57,7 @@ public class TestStartSecureDataNode {
}
@Test
- public void testSecureNameNode() throws IOException, InterruptedException {
+ public void testSecureNameNode() throws Exception {
MiniDFSCluster cluster = null;
try {
String nnPrincipal =
@@ -105,9 +95,9 @@ public class TestStartSecureDataNode {
.build();
cluster.waitActive();
assertTrue(cluster.isDataNodeUp());
-
} catch (Exception ex) {
ex.printStackTrace();
+ throw ex;
} finally {
if (cluster != null) {
cluster.shutdown();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Fri Jan 11 19:40:23 2013
@@ -1209,22 +1209,19 @@ public class TestEditLog {
*
*/
static void validateNoCrash(byte garbage[]) throws IOException {
- final String TEST_LOG_NAME = "test_edit_log";
+ final File TEST_LOG_NAME = new File(TEST_DIR, "test_edit_log");
EditLogFileOutputStream elfos = null;
- File file = null;
EditLogFileInputStream elfis = null;
try {
- file = new File(TEST_LOG_NAME);
- elfos = new EditLogFileOutputStream(file, 0);
+ elfos = new EditLogFileOutputStream(TEST_LOG_NAME, 0);
elfos.create();
elfos.writeRaw(garbage, 0, garbage.length);
elfos.setReadyToFlush();
elfos.flushAndSync(true);
elfos.close();
elfos = null;
- file = new File(TEST_LOG_NAME);
- elfis = new EditLogFileInputStream(file);
+ elfis = new EditLogFileInputStream(TEST_LOG_NAME);
// verify that we can read everything without killing the JVM or
// throwing an exception other than IOException
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java Fri Jan 11 19:40:23 2013
@@ -59,6 +59,8 @@ import com.google.common.collect.Sets;
public class TestNameNodeRecovery {
private static final Log LOG = LogFactory.getLog(TestNameNodeRecovery.class);
private static StartupOption recoverStartOpt = StartupOption.RECOVER;
+ private static final File TEST_DIR = new File(
+ System.getProperty("test.build.data","build/test/data"));
static {
recoverStartOpt.setForce(MetaRecoveryContext.FORCE_ALL);
@@ -66,15 +68,13 @@ public class TestNameNodeRecovery {
}
static void runEditLogTest(EditLogTestSetup elts) throws IOException {
- final String TEST_LOG_NAME = "test_edit_log";
+ final File TEST_LOG_NAME = new File(TEST_DIR, "test_edit_log");
final OpInstanceCache cache = new OpInstanceCache();
EditLogFileOutputStream elfos = null;
- File file = null;
EditLogFileInputStream elfis = null;
try {
- file = new File(TEST_LOG_NAME);
- elfos = new EditLogFileOutputStream(file, 0);
+ elfos = new EditLogFileOutputStream(TEST_LOG_NAME, 0);
elfos.create();
elts.addTransactionsToLog(elfos, cache);
@@ -82,8 +82,7 @@ public class TestNameNodeRecovery {
elfos.flushAndSync(true);
elfos.close();
elfos = null;
- file = new File(TEST_LOG_NAME);
- elfis = new EditLogFileInputStream(file);
+ elfis = new EditLogFileInputStream(TEST_LOG_NAME);
// reading through normally will get you an exception
Set<Long> validTxIds = elts.getValidTxIds();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java?rev=1432246&r1=1432245&r2=1432246&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java Fri Jan 11 19:40:23 2013
@@ -56,7 +56,6 @@ public class TestWebHdfsWithMultipleName
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
- ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF);
}
private static final Configuration conf = new HdfsConfiguration();