You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/04/08 00:53:59 UTC
svn commit: r1310905 - in
/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/ src/main/java/org/apache/hadoop/hdfs/protocol/
src/main/java/org/apache/hadoop/hdfs/protocolPB/
src/main/java/org/apache/hadoop/hdfs/server/j...
Author: todd
Date: Sat Apr 7 22:53:55 2012
New Revision: 1310905
URL: http://svn.apache.org/viewvc?rev=1310905&view=rev
Log:
Merge trunk into auto-failover branch
Added:
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FenceResponse.java
- copied unchanged from r1310901, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FenceResponse.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FencedException.java
- copied unchanged from r1310901, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FencedException.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalInfo.java
- copied unchanged from r1310901, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalInfo.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
- copied unchanged from r1310901, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
Modified:
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/pom.xml
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalService.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs.c
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs.h
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs_test.c
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1310174-1310901
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sat Apr 7 22:53:55 2012
@@ -62,14 +62,14 @@ Trunk (unreleased changes)
HDFS-3178. Add states and state handler for journal synchronization in
JournalService. (szetszwo)
- HDFS-3204. Minor modification to JournalProtocol.proto to make
- it generic. (suresh)
-
OPTIMIZATIONS
HDFS-2834. Add a ByteBuffer-based read API to DFSInputStream.
(Henry Robinson via todd)
+ HDFS-3110. Use directRead API to reduce the number of buffer copies in
+ libhdfs (Henry Robinson via todd)
+
BUG FIXES
HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G
@@ -114,6 +114,9 @@ Trunk (unreleased changes)
HDFS-3126. Journal stream from Namenode to BackupNode needs to have
timeout. (Hari Mankude via suresh)
+
+ HDFS-3121. Add HDFS tests for HADOOP-8014 change. (John George via
+ suresh)
Release 2.0.0 - UNRELEASED
@@ -327,6 +330,17 @@ Release 2.0.0 - UNRELEASED
HDFS-3050. rework OEV to share more code with the NameNode.
(Colin Patrick McCabe via eli)
+ HDFS-3226. Allow GetConf tool to print arbitrary keys (todd)
+
+ HDFS-3204. Minor modification to JournalProtocol.proto to make
+ it generic. (suresh)
+
+ HDFS-2505. Add a test to verify getFileChecksum(..) with ViewFS. (Ravi
+ Prakash via szetszwo)
+
+ HDFS-3211. Add fence(..) and replace NamenodeRegistration with JournalInfo
+ and epoch in JournalProtocol. (suresh via szetszwo)
+
OPTIMIZATIONS
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@@ -436,6 +450,9 @@ Release 2.0.0 - UNRELEASED
HDFS-3208. Bogus entries in hosts files are incorrectly displayed
in the report. (eli)
+ HDFS-3136. Remove SLF4J dependency as HDFS does not need it to fix
+ unnecessary warnings. (Jason Lowe via suresh)
+
BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/pom.xml Sat Apr 7 22:53:55 2012
@@ -91,16 +91,6 @@
<scope>test</scope>
</dependency>
<dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-api</artifactId>
- <scope>compile</scope>
- </dependency>
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-log4j12</artifactId>
- <scope>compile</scope>
- </dependency>
- <dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<scope>test</scope>
Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1310174-1310901
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java Sat Apr 7 22:53:55 2012
@@ -22,6 +22,7 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
/**
@@ -33,6 +34,10 @@ import org.apache.hadoop.hdfs.server.pro
public class UnregisteredNodeException extends IOException {
private static final long serialVersionUID = -5620209396945970810L;
+ public UnregisteredNodeException(JournalInfo info) {
+ super("Unregistered server: " + info.toString());
+ }
+
public UnregisteredNodeException(NodeRegistration nodeReg) {
super("Unregistered server: " + nodeReg.toString());
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java Sat Apr 7 22:53:55 2012
@@ -20,10 +20,13 @@ package org.apache.hadoop.hdfs.protocolP
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.FenceRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.FenceResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto;
+import org.apache.hadoop.hdfs.server.protocol.FenceResponse;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import com.google.protobuf.RpcController;
@@ -48,9 +51,8 @@ public class JournalProtocolServerSideTr
public JournalResponseProto journal(RpcController unused,
JournalRequestProto req) throws ServiceException {
try {
- impl.journal(PBHelper.convert(req.getJournalInfo()),
- req.getFirstTxnId(), req.getNumTxns(), req.getRecords()
- .toByteArray());
+ impl.journal(PBHelper.convert(req.getJournalInfo()), req.getEpoch(),
+ req.getFirstTxnId(), req.getNumTxns(), req.getRecords().toByteArray());
} catch (IOException e) {
throw new ServiceException(e);
}
@@ -63,10 +65,24 @@ public class JournalProtocolServerSideTr
StartLogSegmentRequestProto req) throws ServiceException {
try {
impl.startLogSegment(PBHelper.convert(req.getJournalInfo()),
- req.getTxid());
+ req.getEpoch(), req.getTxid());
} catch (IOException e) {
throw new ServiceException(e);
}
return StartLogSegmentResponseProto.newBuilder().build();
}
+
+ @Override
+ public FenceResponseProto fence(RpcController controller,
+ FenceRequestProto req) throws ServiceException {
+ try {
+ FenceResponse resp = impl.fence(PBHelper.convert(req.getJournalInfo()), req.getEpoch(),
+ req.getFencerInfo());
+ return FenceResponseProto.newBuilder().setInSync(resp.isInSync())
+ .setLastTransactionId(resp.getLastTransactionId())
+ .setPreviousEpoch(resp.getPreviousEpoch()).build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java Sat Apr 7 22:53:55 2012
@@ -22,10 +22,13 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.FenceRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.FenceResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto;
+import org.apache.hadoop.hdfs.server.protocol.FenceResponse;
+import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
-import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.RPC;
@@ -58,10 +61,11 @@ public class JournalProtocolTranslatorPB
}
@Override
- public void journal(NamenodeRegistration reg, long firstTxnId,
+ public void journal(JournalInfo journalInfo, long epoch, long firstTxnId,
int numTxns, byte[] records) throws IOException {
JournalRequestProto req = JournalRequestProto.newBuilder()
- .setJournalInfo(PBHelper.convertToJournalInfo(reg))
+ .setJournalInfo(PBHelper.convert(journalInfo))
+ .setEpoch(epoch)
.setFirstTxnId(firstTxnId)
.setNumTxns(numTxns)
.setRecords(PBHelper.getByteString(records))
@@ -74,10 +78,11 @@ public class JournalProtocolTranslatorPB
}
@Override
- public void startLogSegment(NamenodeRegistration registration, long txid)
+ public void startLogSegment(JournalInfo journalInfo, long epoch, long txid)
throws IOException {
StartLogSegmentRequestProto req = StartLogSegmentRequestProto.newBuilder()
- .setJournalInfo(PBHelper.convertToJournalInfo(registration))
+ .setJournalInfo(PBHelper.convert(journalInfo))
+ .setEpoch(epoch)
.setTxid(txid)
.build();
try {
@@ -86,6 +91,20 @@ public class JournalProtocolTranslatorPB
throw ProtobufHelper.getRemoteException(e);
}
}
+
+ @Override
+ public FenceResponse fence(JournalInfo journalInfo, long epoch,
+ String fencerInfo) throws IOException {
+ FenceRequestProto req = FenceRequestProto.newBuilder().setEpoch(epoch)
+ .setJournalInfo(PBHelper.convert(journalInfo)).build();
+ try {
+ FenceResponseProto resp = rpcProxy.fence(NULL_CONTROLLER, req);
+ return new FenceResponse(resp.getPreviousEpoch(),
+ resp.getLastTransactionId(), resp.getInSync());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
@Override
public boolean isMethodSupported(String methodName) throws IOException {
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Sat Apr 7 22:53:55 2012
@@ -110,6 +110,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -117,6 +118,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
+import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -127,7 +129,6 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
-import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
@@ -1347,25 +1348,19 @@ public class PBHelper {
.setStorageID(r.getStorageID()).build();
}
- public static NamenodeRegistration convert(JournalInfoProto info) {
+ public static JournalInfo convert(JournalInfoProto info) {
int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0;
int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0;
- StorageInfo storage = new StorageInfo(lv, nsID, info.getClusterID(), 0);
-
- // Note that the role is always {@link NamenodeRole#NAMENODE} as this
- // conversion happens for messages from Namenode to Journal receivers.
- // Addresses in the registration are unused.
- return new NamenodeRegistration("", "", storage, NamenodeRole.NAMENODE);
+ return new JournalInfo(lv, info.getClusterID(), nsID);
}
/**
* Method used for converting {@link JournalInfoProto} sent from Namenode
* to Journal receivers to {@link NamenodeRegistration}.
*/
- public static JournalInfoProto convertToJournalInfo(
- NamenodeRegistration reg) {
- return JournalInfoProto.newBuilder().setClusterID(reg.getClusterID())
- .setLayoutVersion(reg.getLayoutVersion())
- .setNamespaceID(reg.getNamespaceID()).build();
+ public static JournalInfoProto convert(JournalInfo j) {
+ return JournalInfoProto.newBuilder().setClusterID(j.getClusterId())
+ .setLayoutVersion(j.getLayoutVersion())
+ .setNamespaceID(j.getNamespaceId()).build();
}
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalService.java?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalService.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/journalservice/JournalService.java Sat Apr 7 22:53:55 2012
@@ -31,6 +31,9 @@ import org.apache.hadoop.hdfs.protocolPB
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.protocol.FenceResponse;
+import org.apache.hadoop.hdfs.server.protocol.FencedException;
+import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -40,6 +43,7 @@ import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
+import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
/**
@@ -66,6 +70,8 @@ public class JournalService implements J
private final NamenodeProtocol namenode;
private final StateHandler stateHandler = new StateHandler();
private final RPC.Server rpcServer;
+ private long epoch = 0;
+ private String fencerInfo;
enum State {
/** The service is initialized and ready to start. */
@@ -115,7 +121,7 @@ public class JournalService implements J
current = State.WAITING_FOR_ROLL;
}
- synchronized void startLogSegment() throws IOException {
+ synchronized void startLogSegment() {
if (current == State.WAITING_FOR_ROLL) {
current = State.SYNCING;
}
@@ -232,28 +238,42 @@ public class JournalService implements J
}
@Override
- public void journal(NamenodeRegistration registration, long firstTxnId,
+ public void journal(JournalInfo journalInfo, long epoch, long firstTxnId,
int numTxns, byte[] records) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Received journal " + firstTxnId + " " + numTxns);
}
stateHandler.isJournalAllowed();
- verify(registration);
+ verify(epoch, journalInfo);
listener.journal(this, firstTxnId, numTxns, records);
}
@Override
- public void startLogSegment(NamenodeRegistration registration, long txid)
+ public void startLogSegment(JournalInfo journalInfo, long epoch, long txid)
throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Received startLogSegment " + txid);
}
stateHandler.isStartLogSegmentAllowed();
- verify(registration);
+ verify(epoch, journalInfo);
listener.rollLogs(this, txid);
stateHandler.startLogSegment();
}
+ @Override
+ public FenceResponse fence(JournalInfo journalInfo, long epoch,
+ String fencerInfo) throws IOException {
+ LOG.info("Fenced by " + fencerInfo + " with epoch " + epoch);
+ verifyFence(epoch, fencerInfo);
+ verify(journalInfo);
+ long previousEpoch = epoch;
+ this.epoch = epoch;
+ this.fencerInfo = fencerInfo;
+
+ // TODO:HDFS-3092 set lastTransId and inSync
+ return new FenceResponse(previousEpoch, 0, false);
+ }
+
/** Create an RPC server. */
private static RPC.Server createRpcServer(Configuration conf,
InetSocketAddress address, JournalProtocol impl) throws IOException {
@@ -267,15 +287,54 @@ public class JournalService implements J
address.getHostName(), address.getPort(), 1, false, conf, null);
}
- private void verify(NamenodeRegistration reg) throws IOException {
- if (!registration.getRegistrationID().equals(reg.getRegistrationID())) {
- LOG.warn("Invalid registrationID - expected: "
- + registration.getRegistrationID() + " received: "
- + reg.getRegistrationID());
- throw new UnregisteredNodeException(reg);
+ private void verifyEpoch(long e) throws FencedException {
+ if (epoch != e) {
+ String errorMsg = "Epoch " + e + " is not valid. "
+ + "Resource has already been fenced by " + fencerInfo
+ + " with epoch " + epoch;
+ LOG.warn(errorMsg);
+ throw new FencedException(errorMsg);
+ }
+ }
+
+ private void verifyFence(long e, String fencer) throws FencedException {
+ if (e <= epoch) {
+ String errorMsg = "Epoch " + e + " from fencer " + fencer
+ + " is not valid. " + "Resource has already been fenced by "
+ + fencerInfo + " with epoch " + epoch;
+ LOG.warn(errorMsg);
+ throw new FencedException(errorMsg);
+ }
+ }
+
+ /**
+ * Verifies a journal request
+ */
+ private void verify(JournalInfo journalInfo) throws IOException {
+ String errorMsg = null;
+ int expectedNamespaceID = registration.getNamespaceID();
+ if (journalInfo.getNamespaceId() != expectedNamespaceID) {
+ errorMsg = "Invalid namespaceID in journal request - expected " + expectedNamespaceID
+ + " actual " + journalInfo.getNamespaceId();
+ LOG.warn(errorMsg);
+ throw new UnregisteredNodeException(journalInfo);
+ }
+ if (!journalInfo.getClusterId().equals(registration.getClusterID())) {
+ errorMsg = "Invalid clusterId in journal request - expected "
+ + journalInfo.getClusterId() + " actual " + registration.getClusterID();
+ LOG.warn(errorMsg);
+ throw new UnregisteredNodeException(journalInfo);
}
}
+ /**
+ * Verifies a journal request
+ */
+ private void verify(long e, JournalInfo journalInfo) throws IOException {
+ verifyEpoch(e);
+ verify(journalInfo);
+ }
+
/**
* Register this service with the active namenode.
*/
@@ -298,4 +357,9 @@ public class JournalService implements J
listener.verifyVersion(this, nsInfo);
registration.setStorageInfo(nsInfo);
}
-}
\ No newline at end of file
+
+ @VisibleForTesting
+ long getEpoch() {
+ return epoch;
+ }
+}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java Sat Apr 7 22:53:55 2012
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.na
import java.io.IOException;
+import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
/**
@@ -26,19 +27,20 @@ import org.apache.hadoop.hdfs.server.pro
* to a BackupNode.
*/
class BackupJournalManager implements JournalManager {
-
- private final NamenodeRegistration nnReg;
private final NamenodeRegistration bnReg;
+ private final JournalInfo journalInfo;
BackupJournalManager(NamenodeRegistration bnReg,
NamenodeRegistration nnReg) {
+ journalInfo = new JournalInfo(nnReg.getLayoutVersion(),
+ nnReg.getClusterID(), nnReg.getNamespaceID());
this.bnReg = bnReg;
- this.nnReg = nnReg;
}
@Override
public EditLogOutputStream startLogSegment(long txId) throws IOException {
- EditLogBackupOutputStream stm = new EditLogBackupOutputStream(bnReg, nnReg);
+ EditLogBackupOutputStream stm = new EditLogBackupOutputStream(bnReg,
+ journalInfo);
stm.startLogSegment(txId);
return stm;
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Sat Apr 7 22:53:55 2012
@@ -35,6 +35,8 @@ import org.apache.hadoop.hdfs.protocolPB
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.protocol.FenceResponse;
+import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -217,7 +219,8 @@ public class BackupNode extends NameNode
}
/* @Override */// NameNode
- public boolean setSafeMode(SafeModeAction action) throws IOException {
+ public boolean setSafeMode(@SuppressWarnings("unused") SafeModeAction action)
+ throws IOException {
throw new UnsupportedActionException("setSafeMode");
}
@@ -236,51 +239,56 @@ public class BackupNode extends NameNode
/**
* Verifies a journal request
- * @param nodeReg node registration
- * @throws UnregisteredNodeException if the registration is invalid
*/
- void verifyJournalRequest(NamenodeRegistration reg) throws IOException {
- verifyVersion(reg.getLayoutVersion());
+ private void verifyJournalRequest(JournalInfo journalInfo)
+ throws IOException {
+ verifyVersion(journalInfo.getLayoutVersion());
String errorMsg = null;
int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID();
- if (reg.getNamespaceID() != expectedNamespaceID) {
+ if (journalInfo.getNamespaceId() != expectedNamespaceID) {
errorMsg = "Invalid namespaceID in journal request - expected " + expectedNamespaceID
- + " actual " + reg.getNamespaceID();
+ + " actual " + journalInfo.getNamespaceId();
LOG.warn(errorMsg);
- throw new UnregisteredNodeException(reg);
+ throw new UnregisteredNodeException(journalInfo);
}
- if (!reg.getClusterID().equals(namesystem.getClusterId())) {
+ if (!journalInfo.getClusterId().equals(namesystem.getClusterId())) {
errorMsg = "Invalid clusterId in journal request - expected "
- + reg.getClusterID() + " actual " + namesystem.getClusterId();
+ + journalInfo.getClusterId() + " actual " + namesystem.getClusterId();
LOG.warn(errorMsg);
- throw new UnregisteredNodeException(reg);
+ throw new UnregisteredNodeException(journalInfo);
}
}
-
/////////////////////////////////////////////////////
// BackupNodeProtocol implementation for backup node.
/////////////////////////////////////////////////////
@Override
- public void startLogSegment(NamenodeRegistration registration, long txid)
- throws IOException {
+ public void startLogSegment(JournalInfo journalInfo, long epoch,
+ long txid) throws IOException {
namesystem.checkOperation(OperationCategory.JOURNAL);
- verifyJournalRequest(registration);
+ verifyJournalRequest(journalInfo);
getBNImage().namenodeStartedLogSegment(txid);
}
@Override
- public void journal(NamenodeRegistration nnReg,
- long firstTxId, int numTxns,
- byte[] records) throws IOException {
+ public void journal(JournalInfo journalInfo, long epoch, long firstTxId,
+ int numTxns, byte[] records) throws IOException {
namesystem.checkOperation(OperationCategory.JOURNAL);
- verifyJournalRequest(nnReg);
+ verifyJournalRequest(journalInfo);
getBNImage().journal(firstTxId, numTxns, records);
}
private BackupImage getBNImage() {
return (BackupImage)nn.getFSImage();
}
+
+ @Override
+ public FenceResponse fence(JournalInfo journalInfo, long epoch,
+ String fencerInfo) throws IOException {
+ LOG.info("Fenced by " + fencerInfo + " with epoch " + epoch);
+ throw new UnsupportedOperationException(
+ "BackupNode does not support fence");
+ }
}
//////////////////////////////////////////////////////
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java Sat Apr 7 22:53:55 2012
@@ -24,6 +24,7 @@ import java.util.Arrays;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.io.DataOutputBuffer;
@@ -42,18 +43,18 @@ import org.apache.hadoop.security.UserGr
class EditLogBackupOutputStream extends EditLogOutputStream {
static int DEFAULT_BUFFER_SIZE = 256;
- private JournalProtocol backupNode; // RPC proxy to backup node
- private NamenodeRegistration bnRegistration; // backup node registration
- private NamenodeRegistration nnRegistration; // active node registration
+ private final JournalProtocol backupNode; // RPC proxy to backup node
+ private final NamenodeRegistration bnRegistration; // backup node registration
+ private final JournalInfo journalInfo; // active node registration
+ private final DataOutputBuffer out; // serialized output sent to backup node
private EditsDoubleBuffer doubleBuf;
- private DataOutputBuffer out; // serialized output sent to backup node
EditLogBackupOutputStream(NamenodeRegistration bnReg, // backup node
- NamenodeRegistration nnReg) // active name-node
+ JournalInfo journalInfo) // active name-node
throws IOException {
super();
this.bnRegistration = bnReg;
- this.nnRegistration = nnReg;
+ this.journalInfo = journalInfo;
InetSocketAddress bnAddress =
NetUtils.createSocketAddr(bnRegistration.getAddress());
try {
@@ -127,8 +128,7 @@ class EditLogBackupOutputStream extends
out.reset();
assert out.getLength() == 0 : "Output buffer is not empty";
- backupNode.journal(nnRegistration,
- firstTxToFlush, numReadyTxns, data);
+ backupNode.journal(journalInfo, 0, firstTxToFlush, numReadyTxns, data);
}
}
@@ -140,6 +140,6 @@ class EditLogBackupOutputStream extends
}
void startLogSegment(long txId) throws IOException {
- backupNode.startLogSegment(nnRegistration, txId);
+ backupNode.startLogSegment(journalInfo, 0, txId);
}
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java Sat Apr 7 22:53:55 2012
@@ -21,7 +21,6 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
/**
@@ -53,12 +52,15 @@ public interface JournalProtocol {
* via {@code EditLogBackupOutputStream} in order to synchronize meta-data
* changes with the backup namespace image.
*
- * @param registration active node registration
+ * @param journalInfo journal information
+ * @param epoch marks beginning a new journal writer
* @param firstTxnId the first transaction of this batch
* @param numTxns number of transactions
* @param records byte array containing serialized journal records
+ * @throws FencedException if the resource has been fenced
*/
- public void journal(NamenodeRegistration registration,
+ public void journal(JournalInfo journalInfo,
+ long epoch,
long firstTxnId,
int numTxns,
byte[] records) throws IOException;
@@ -66,9 +68,24 @@ public interface JournalProtocol {
/**
* Notify the BackupNode that the NameNode has rolled its edit logs
* and is now writing a new log segment.
- * @param registration the registration of the active NameNode
+ * @param journalInfo journal information
+ * @param epoch marks beginning a new journal writer
* @param txid the first txid in the new log
+ * @throws FencedException if the resource has been fenced
*/
- public void startLogSegment(NamenodeRegistration registration,
+ public void startLogSegment(JournalInfo journalInfo, long epoch,
long txid) throws IOException;
+
+ /**
+ * Request to fence any other journal writers.
+ * Older writers with at previous epoch will be fenced and can no longer
+ * perform journal operations.
+ *
+ * @param journalInfo journal information
+ * @param epoch marks beginning a new journal writer
+ * @param fencerInfo info about fencer for debugging purposes
+ * @throws FencedException if the resource has been fenced
+ */
+ public FenceResponse fence(JournalInfo journalInfo, long epoch,
+ String fencerInfo) throws IOException;
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java Sat Apr 7 22:53:55 2012
@@ -21,10 +21,12 @@ import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hdfs.DFSUtil;
@@ -70,7 +72,8 @@ public class GetConf extends Configured
EXCLUDE_FILE("-excludeFile",
"gets the exclude file path that defines the datanodes " +
"that need to decommissioned."),
- NNRPCADDRESSES("-nnRpcAddresses", "gets the namenode rpc addresses");
+ NNRPCADDRESSES("-nnRpcAddresses", "gets the namenode rpc addresses"),
+ CONFKEY("-confKey [key]", "gets a specific key from the configuration");
private static Map<String, CommandHandler> map;
static {
@@ -87,6 +90,8 @@ public class GetConf extends Configured
new CommandHandler("DFSConfigKeys.DFS_HOSTS_EXCLUDE"));
map.put(NNRPCADDRESSES.getName().toLowerCase(),
new NNRpcAddressesCommandHandler());
+ map.put(CONFKEY.getName().toLowerCase(),
+ new PrintConfKeyCommandHandler());
}
private final String cmd;
@@ -98,6 +103,10 @@ public class GetConf extends Configured
}
public String getName() {
+ return cmd.split(" ")[0];
+ }
+
+ public String getUsage() {
return cmd;
}
@@ -105,8 +114,8 @@ public class GetConf extends Configured
return description;
}
- public static CommandHandler getHandler(String name) {
- return map.get(name.toLowerCase());
+ public static CommandHandler getHandler(String cmd) {
+ return map.get(cmd.toLowerCase());
}
}
@@ -118,7 +127,7 @@ public class GetConf extends Configured
StringBuilder usage = new StringBuilder(DESCRIPTION);
usage.append("\nhadoop getconf \n");
for (Command cmd : Command.values()) {
- usage.append("\t[" + cmd.getName() + "]\t\t\t" + cmd.getDescription()
+ usage.append("\t[" + cmd.getUsage() + "]\t\t\t" + cmd.getDescription()
+ "\n");
}
USAGE = usage.toString();
@@ -128,7 +137,7 @@ public class GetConf extends Configured
* Handler to return value for key corresponding to the {@link Command}
*/
static class CommandHandler {
- final String key; // Configuration key to lookup
+ String key; // Configuration key to lookup
CommandHandler() {
this(null);
@@ -138,18 +147,30 @@ public class GetConf extends Configured
this.key = key;
}
- final int doWork(GetConf tool) {
+ final int doWork(GetConf tool, String[] args) {
try {
- return doWorkInternal(tool);
+ checkArgs(args);
+
+ return doWorkInternal(tool, args);
} catch (Exception e) {
tool.printError(e.getMessage());
}
return -1;
}
+
+ protected void checkArgs(String args[]) {
+ if (args.length > 0) {
+ throw new HadoopIllegalArgumentException(
+ "Did not expect argument: " + args[0]);
+ }
+ }
+
- /** Method to be overridden by sub classes for specific behavior */
- int doWorkInternal(GetConf tool) throws Exception {
- String value = tool.getConf().get(key);
+ /** Method to be overridden by sub classes for specific behavior
+ * @param args */
+ int doWorkInternal(GetConf tool, String[] args) throws Exception {
+
+ String value = tool.getConf().getTrimmed(key);
if (value != null) {
tool.printOut(value);
return 0;
@@ -164,7 +185,7 @@ public class GetConf extends Configured
*/
static class NameNodesCommandHandler extends CommandHandler {
@Override
- int doWorkInternal(GetConf tool) throws IOException {
+ int doWorkInternal(GetConf tool, String []args) throws IOException {
tool.printMap(DFSUtil.getNNServiceRpcAddresses(tool.getConf()));
return 0;
}
@@ -175,7 +196,7 @@ public class GetConf extends Configured
*/
static class BackupNodesCommandHandler extends CommandHandler {
@Override
- public int doWorkInternal(GetConf tool) throws IOException {
+ public int doWorkInternal(GetConf tool, String []args) throws IOException {
tool.printMap(DFSUtil.getBackupNodeAddresses(tool.getConf()));
return 0;
}
@@ -186,7 +207,7 @@ public class GetConf extends Configured
*/
static class SecondaryNameNodesCommandHandler extends CommandHandler {
@Override
- public int doWorkInternal(GetConf tool) throws IOException {
+ public int doWorkInternal(GetConf tool, String []args) throws IOException {
tool.printMap(DFSUtil.getSecondaryNameNodeAddresses(tool.getConf()));
return 0;
}
@@ -199,7 +220,7 @@ public class GetConf extends Configured
*/
static class NNRpcAddressesCommandHandler extends CommandHandler {
@Override
- public int doWorkInternal(GetConf tool) throws IOException {
+ public int doWorkInternal(GetConf tool, String []args) throws IOException {
Configuration config = tool.getConf();
List<ConfiguredNNAddress> cnnlist = DFSUtil.flattenAddressMap(
DFSUtil.getNNServiceRpcAddresses(config));
@@ -215,6 +236,23 @@ public class GetConf extends Configured
}
}
+ static class PrintConfKeyCommandHandler extends CommandHandler {
+ @Override
+ protected void checkArgs(String[] args) {
+ if (args.length != 1) {
+ throw new HadoopIllegalArgumentException(
+ "usage: " + Command.CONFKEY.getUsage());
+ }
+ }
+
+ @Override
+ int doWorkInternal(GetConf tool, String[] args) throws Exception {
+ this.key = args[0];
+ System.err.println("key: " + key);
+ return super.doWorkInternal(tool, args);
+ }
+ }
+
private final PrintStream out; // Stream for printing command output
private final PrintStream err; // Stream for printing error
@@ -260,10 +298,11 @@ public class GetConf extends Configured
* @return return status of the command
*/
private int doWork(String[] args) {
- if (args.length == 1) {
+ if (args.length >= 1) {
CommandHandler handler = Command.getHandler(args[0]);
if (handler != null) {
- return handler.doWork(this);
+ return handler.doWork(this,
+ Arrays.copyOfRange(args, 1, args.length));
}
}
printUsage();
Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1310174-1310901
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs.c?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs.c (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs.c Sat Apr 7 22:53:55 2012
@@ -123,6 +123,11 @@ static int errnoFromException(jthrowable
goto done;
}
+ if (!strcmp(excClass, "java.lang.UnsupportedOperationException")) {
+ errnum = ENOTSUP;
+ goto done;
+ }
+
if (!strcmp(excClass, "org.apache.hadoop.security."
"AccessControlException")) {
errnum = EACCES;
@@ -614,8 +619,29 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const c
} else {
file->file = (*env)->NewGlobalRef(env, jVal.l);
file->type = (((flags & O_WRONLY) == 0) ? INPUT : OUTPUT);
+ file->flags = 0;
destroyLocalReference(env, jVal.l);
+
+ if ((flags & O_WRONLY) == 0) {
+ // Try a test read to see if we can do direct reads
+ errno = 0;
+ char buf;
+ if (readDirect(fs, file, &buf, 0) == 0) {
+ // Success - 0-byte read should return 0
+ file->flags |= HDFS_FILE_SUPPORTS_DIRECT_READ;
+ } else {
+ if (errno != ENOTSUP) {
+ // Unexpected error. Clear it, don't set the direct flag.
+ fprintf(stderr,
+ "WARN: Unexpected error %d when testing "
+ "for direct read compatibility\n", errno);
+ errno = 0;
+ goto done;
+ }
+ }
+ errno = 0;
+ }
}
done:
@@ -706,10 +732,57 @@ int hdfsExists(hdfsFS fs, const char *pa
return jVal.z ? 0 : -1;
}
+// Checks input file for readiness for reading.
+static int readPrepare(JNIEnv* env, hdfsFS fs, hdfsFile f,
+ jobject* jInputStream)
+{
+ *jInputStream = (jobject)(f ? f->file : NULL);
+ //Sanity check
+ if (!f || f->type == UNINITIALIZED) {
+ errno = EBADF;
+ return -1;
+ }
+
+ //Error checking... make sure that this file is 'readable'
+ if (f->type != INPUT) {
+ fprintf(stderr, "Cannot read from a non-InputStream object!\n");
+ errno = EINVAL;
+ return -1;
+ }
+
+ return 0;
+}
+
+// Common error-handling code between read paths.
+static int handleReadResult(int success, jvalue jVal, jthrowable jExc,
+ JNIEnv* env)
+{
+ int noReadBytes;
+ if (success != 0) {
+ if ((*env)->ExceptionCheck(env)) {
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FSDataInputStream::read");
+ }
+ noReadBytes = -1;
+ } else {
+ noReadBytes = jVal.i;
+ if (noReadBytes < 0) {
+ // -1 from Java is EOF, which is 0 here
+ noReadBytes = 0;
+ }
+ errno = 0;
+ }
+
+ return noReadBytes;
+}
tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
{
+ if (f->flags & HDFS_FILE_SUPPORTS_DIRECT_READ) {
+ return readDirect(fs, f, buffer, length);
+ }
+
// JAVA EQUIVALENT:
// byte [] bR = new byte[length];
// fis.read(bR);
@@ -722,49 +795,75 @@ tSize hdfsRead(hdfsFS fs, hdfsFile f, vo
}
//Parameters
- jobject jInputStream = (jobject)(f ? f->file : NULL);
+ jobject jInputStream;
+ if (readPrepare(env, fs, f, &jInputStream) == -1) {
+ return -1;
+ }
jbyteArray jbRarray;
jint noReadBytes = 0;
jvalue jVal;
jthrowable jExc = NULL;
- //Sanity check
- if (!f || f->type == UNINITIALIZED) {
- errno = EBADF;
- return -1;
+ //Read the requisite bytes
+ jbRarray = (*env)->NewByteArray(env, length);
+
+ int success = invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
+ "read", "([B)I", jbRarray);
+
+ noReadBytes = handleReadResult(success, jVal, jExc, env);;
+
+ if (noReadBytes > 0) {
+ (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
}
- //Error checking... make sure that this file is 'readable'
- if (f->type != INPUT) {
- fprintf(stderr, "Cannot read from a non-InputStream object!\n");
- errno = EINVAL;
- return -1;
+ destroyLocalReference(env, jbRarray);
+
+ return noReadBytes;
+}
+
+// Reads using the read(ByteBuffer) API, which does fewer copies
+tSize readDirect(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
+{
+ // JAVA EQUIVALENT:
+ // ByteBuffer bbuffer = ByteBuffer.allocateDirect(length) // wraps C buffer
+ // fis.read(bbuffer);
+
+ //Get the JNIEnv* corresponding to current thread
+ JNIEnv* env = getJNIEnv();
+ if (env == NULL) {
+ errno = EINTERNAL;
+ return -1;
}
- //Read the requisite bytes
- jbRarray = (*env)->NewByteArray(env, length);
- if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
- "read", "([B)I", jbRarray) != 0) {
- errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
- "FSDataInputStream::read");
- noReadBytes = -1;
+ jobject jInputStream;
+ if (readPrepare(env, fs, f, &jInputStream) == -1) {
+ return -1;
}
- else {
- noReadBytes = jVal.i;
- if (noReadBytes > 0) {
- (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
- } else {
- //This is a valid case: there aren't any bytes left to read!
- if (noReadBytes == 0 || noReadBytes < -1) {
- fprintf(stderr, "WARN: FSDataInputStream.read returned invalid return code - libhdfs returning EOF, i.e., 0: %d\n", noReadBytes);
- }
- noReadBytes = 0;
- }
- errno = 0;
+
+ jint noReadBytes = 0;
+ jvalue jVal;
+ jthrowable jExc = NULL;
+
+ //Read the requisite bytes
+ jobject bb = (*env)->NewDirectByteBuffer(env, buffer, length);
+ if (bb == NULL) {
+ fprintf(stderr, "Could not allocate ByteBuffer");
+ if ((*env)->ExceptionCheck(env)) {
+ errno = errnoFromException(NULL, env, "JNIEnv::NewDirectByteBuffer");
+ } else {
+ errno = ENOMEM; // Best guess if there's no exception waiting
+ }
+ return -1;
}
- destroyLocalReference(env, jbRarray);
+ int success = invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream,
+ HADOOP_ISTRM, "read", "(Ljava/nio/ByteBuffer;)I",
+ bb);
+
+ noReadBytes = handleReadResult(success, jVal, jExc, env);
+
+ destroyLocalReference(env, bb);
return noReadBytes;
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs.h?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs.h (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs.h Sat Apr 7 22:53:55 2012
@@ -81,12 +81,16 @@ extern "C" {
};
+ // Bit fields for hdfsFile_internal flags
+ #define HDFS_FILE_SUPPORTS_DIRECT_READ (1<<0)
+
/**
* The 'file-handle' to a file in hdfs.
*/
struct hdfsFile_internal {
void* file;
enum hdfsStreamType type;
+ uint32_t flags;
};
typedef struct hdfsFile_internal* hdfsFile;
@@ -203,7 +207,6 @@ extern "C" {
*/
tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length);
-
/**
* hdfsPread - Positional read of data from an open file.
* @param fs The configured filesystem handle.
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs_test.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs_test.c?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs_test.c (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/hdfs_test.c Sat Apr 7 22:53:55 2012
@@ -18,6 +18,8 @@
#include "hdfs.h"
+tSize readDirect(hdfsFS fs, hdfsFile f, void* buffer, tSize length);
+
void permission_disp(short permissions, char *rtr) {
rtr[9] = '\0';
int i;
@@ -51,7 +53,6 @@ void permission_disp(short permissions,
}
int main(int argc, char **argv) {
-
hdfsFS fs = hdfsConnectNewInstance("default", 0);
if(!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
@@ -64,20 +65,25 @@ int main(int argc, char **argv) {
exit(-1);
}
- const char* writePath = "/tmp/testfile.txt";
+ const char* writePath = "/tmp/testfile.txt";
+ const char* fileContents = "Hello, World!";
+
{
//Write tests
-
hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!writeFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writePath);
exit(-1);
}
fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
-
- char* buffer = "Hello, World!";
- tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
+ tSize num_written_bytes =
+ hdfsWrite(fs, writeFile, (void*)fileContents, strlen(fileContents)+1);
+ if (num_written_bytes != strlen(fileContents) + 1) {
+ fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
+ (int)(strlen(fileContents) + 1), (int)num_written_bytes);
+ exit(-1);
+ }
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
tOffset currentPos = -1;
@@ -138,18 +144,86 @@ int main(int argc, char **argv) {
}
fprintf(stderr, "Current position: %ld\n", currentPos);
+ if ((readFile->flags & HDFS_FILE_SUPPORTS_DIRECT_READ) == 0) {
+ fprintf(stderr, "Direct read support incorrectly not detected "
+ "for HDFS filesystem\n");
+ exit(-1);
+ }
+
+ fprintf(stderr, "Direct read support detected for HDFS\n");
+
+ // Clear flags so that we really go through slow read path
+ readFile->flags &= ~HDFS_FILE_SUPPORTS_DIRECT_READ;
+
static char buffer[32];
tSize num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
sizeof(buffer));
fprintf(stderr, "Read following %d bytes:\n%s\n",
num_read_bytes, buffer);
+ memset(buffer, 0, strlen(fileContents + 1));
+
num_read_bytes = hdfsPread(fs, readFile, 0, (void*)buffer,
sizeof(buffer));
fprintf(stderr, "Read following %d bytes:\n%s\n",
num_read_bytes, buffer);
+ if (hdfsSeek(fs, readFile, 0L)) {
+ fprintf(stderr,
+ "Failed to seek to file start for direct read test!\n");
+ exit(-1);
+ }
+
+ readFile->flags |= HDFS_FILE_SUPPORTS_DIRECT_READ;
+
+ memset(buffer, 0, strlen(fileContents + 1));
+ num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
+ sizeof(buffer));
+ if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+ fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
+ fileContents, buffer, num_read_bytes);
+ exit(-1);
+ }
+ fprintf(stderr, "Read (direct) following %d bytes:\n%s\n",
+ num_read_bytes, buffer);
hdfsCloseFile(fs, readFile);
+
+ // Test correct behaviour for unsupported filesystems
+ hdfsFile localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+ if(!localFile) {
+ fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+ exit(-1);
+ }
+
+ tSize num_written_bytes = hdfsWrite(lfs, localFile,
+ (void*)fileContents,
+ strlen(fileContents) + 1);
+
+ hdfsCloseFile(lfs, localFile);
+ localFile = hdfsOpenFile(lfs, writePath, O_RDONLY, 0, 0, 0);
+
+ if (localFile->flags & HDFS_FILE_SUPPORTS_DIRECT_READ) {
+ fprintf(stderr, "Direct read support incorrectly detected for local "
+ "filesystem\n");
+ exit(-1);
+ }
+
+ memset(buffer, 0, strlen(fileContents + 1));
+ int result = readDirect(lfs, localFile, (void*)buffer, sizeof(buffer));
+ if (result != -1) {
+ fprintf(stderr, "Expected error from local direct read not seen!\n");
+ exit(-1);
+ }
+
+ if (errno != ENOTSUP) {
+ fprintf(stderr, "Error code not correctly set to ENOTSUP, was %d!\n",
+ errno);
+ exit(-1);
+ }
+
+ fprintf(stderr, "Expected exception thrown for unsupported direct read\n");
+
+ hdfsCloseFile(lfs, localFile);
}
int totalResult = 0;
@@ -446,4 +520,3 @@ int main(int argc, char **argv) {
/**
* vim: ts=4: sw=4: et:
*/
-
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh Sat Apr 7 22:53:55 2012
@@ -17,126 +17,64 @@
#
#
-# Note: This script depends on 8 environment variables to function correctly:
-# a) CLASSPATH
-# b) HADOOP_PREFIX
-# c) HADOOP_CONF_DIR
-# d) HADOOP_LOG_DIR
-# e) LIBHDFS_BUILD_DIR
-# f) LIBHDFS_INSTALL_DIR
-# g) OS_NAME
-# h) CLOVER_JAR
-# i} HADOOP_VERSION
-# j) HADOOP_HDFS_HOME
-# All these are passed by build.xml.
+# Note: This script depends on 5 environment variables to function correctly:
+# a) HADOOP_HOME - must be set
+# b) HDFS_TEST_CONF_DIR - optional; the directory to read and write
+# core-site.xml to. Defaults to /tmp
+# c) LIBHDFS_BUILD_DIR - optional; the location of the hdfs_test
+# executable. Defaults to the parent directory.
+# d) OS_NAME - used to choose how to locate libjvm.so
+# e) CLOVER_JAR - optional; the location of the Clover code coverage tool's jar.
#
-HDFS_TEST=hdfs_test
-HADOOP_LIB_DIR=$HADOOP_PREFIX/lib
-HADOOP_BIN_DIR=$HADOOP_PREFIX/bin
-
-COMMON_BUILD_DIR=$HADOOP_PREFIX/build/ivy/lib/hadoop-hdfs/common
-COMMON_JAR=$COMMON_BUILD_DIR/hadoop-common-$HADOOP_VERSION.jar
+if [ "x$HADOOP_HOME" == "x" ]; then
+ echo "HADOOP_HOME is unset!"
+ exit 1
+fi
-cat > $HADOOP_CONF_DIR/core-site.xml <<EOF
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<configuration>
-<property>
- <name>hadoop.tmp.dir</name>
- <value>file:///$LIBHDFS_TEST_DIR</value>
-</property>
-<property>
- <name>fs.default.name</name>
- <value>hdfs://localhost:23000/</value>
-</property>
-</configuration>
-EOF
-
-cat > $HADOOP_CONF_DIR/hdfs-site.xml <<EOF
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<configuration>
-<property>
- <name>dfs.replication</name>
- <value>1</value>
-</property>
-<property>
- <name>dfs.support.append</name>
- <value>true</value>
-</property>
-<property>
- <name>dfs.namenode.logging.level</name>
- <value>DEBUG</value>
-</property>
-</configuration>
-EOF
-
-cat > $HADOOP_CONF_DIR/slaves <<EOF
-localhost
-EOF
-
-# If we are running from the hdfs repo we need to make sure
-# HADOOP_BIN_DIR contains the common scripts.
-# If the bin directory does not and we've got a common jar extract its
-# bin directory to HADOOP_PREFIX/bin. The bin scripts hdfs-config.sh and
-# hadoop-config.sh assume the bin directory is named "bin" and that it
-# is located in HADOOP_PREFIX.
-unpacked_common_bin_dir=0
-if [ ! -f $HADOOP_BIN_DIR/hadoop-config.sh ]; then
- if [ -f $COMMON_JAR ]; then
- jar xf $COMMON_JAR bin.tgz
- tar xfz bin.tgz -C $HADOOP_BIN_DIR
- unpacked_common_bin_dir=1
- fi
+if [ "x$LIBHDFS_BUILD_DIR" == "x" ]; then
+ LIBHDFS_BUILD_DIR=`pwd`/../
fi
-# Manipulate HADOOP_CONF_DIR too
-# which is necessary to circumvent bin/hadoop
-HADOOP_CONF_DIR=$HADOOP_CONF_DIR:$HADOOP_PREFIX/conf
+if [ "x$HDFS_TEST_CONF_DIR" == "x" ]; then
+ HDFS_TEST_CONF_DIR=/tmp
+fi
-# set pid file dir so they are not written to /tmp
-export HADOOP_PID_DIR=$HADOOP_LOG_DIR
+# LIBHDFS_INSTALL_DIR is the directory containing libhdfs.so
+LIBHDFS_INSTALL_DIR=$HADOOP_HOME/lib/native/
+HDFS_TEST=hdfs_test
-# CLASSPATH initially contains $HADOOP_CONF_DIR
-CLASSPATH="${HADOOP_CONF_DIR}"
-CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
+HDFS_TEST_JAR=`find $HADOOP_HOME/share/hadoop/hdfs/ \
+-name "hadoop-hdfs-*-tests.jar" | head -n 1`
-# for developers, add Hadoop classes to CLASSPATH
-if [ -d "$HADOOP_PREFIX/build/classes" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/classes
-fi
-if [ -d "$HADOOP_PREFIX/build/web/webapps" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/web
-fi
-if [ -d "$HADOOP_PREFIX/build/test/classes" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/test/classes
+if [ "x$HDFS_TEST_JAR" == "x" ]; then
+ echo "HDFS test jar not found! Tried looking in all subdirectories \
+of $HADOOP_HOME/share/hadoop/hdfs/"
+ exit 1
fi
+echo "Found HDFS test jar at $HDFS_TEST_JAR"
+
+# CLASSPATH initially contains $HDFS_TEST_CONF_DIR
+CLASSPATH="${HDFS_TEST_CONF_DIR}"
+CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
+
# add Clover jar file needed for code coverage runs
CLASSPATH=${CLASSPATH}:${CLOVER_JAR};
# so that filenames w/ spaces are handled correctly in loops below
-IFS=
+IFS=$'\n'
-# add libs to CLASSPATH
-for f in $HADOOP_PREFIX/lib/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-
-for f in $HADOOP_PREFIX/*.jar; do
- CLASSPATH=${CLASSPATH}:$f
-done
-for f in $HADOOP_PREFIX/lib/jsp-2.1/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
-done
-
-if [ -d "$COMMON_BUILD_DIR" ]; then
- CLASSPATH=$CLASSPATH:$COMMON_JAR
- for f in $COMMON_BUILD_DIR/*.jar; do
- CLASSPATH=${CLASSPATH}:$f;
- done
-fi
+JAR_DIRS="$HADOOP_HOME/share/hadoop/common/lib/
+$HADOOP_HOME/share/hadoop/common/
+$HADOOP_HOME/share/hadoop/hdfs
+$HADOOP_HOME/share/hadoop/hdfs/lib/"
+
+for d in $JAR_DIRS; do
+ for j in $d/*.jar; do
+ CLASSPATH=${CLASSPATH}:$j
+ done;
+done;
# restore ordinary behaviour
unset IFS
@@ -178,21 +116,37 @@ echo LIB_JVM_DIR = $LIB_JVM_DIR
echo "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
# Put delays to ensure hdfs is up and running and also shuts down
# after the tests are complete
-cd $HADOOP_PREFIX
-echo Y | $HADOOP_BIN_DIR/hdfs namenode -format &&
-$HADOOP_BIN_DIR/hadoop-daemon.sh --script $HADOOP_BIN_DIR/hdfs start namenode && sleep 2
-$HADOOP_BIN_DIR/hadoop-daemon.sh --script $HADOOP_BIN_DIR/hdfs start datanode && sleep 2
-echo "Wait 30s for the datanode to start up..."
-sleep 30
-CLASSPATH=$CLASSPATH LD_PRELOAD="$LIB_JVM_DIR/libjvm.so:$LIBHDFS_INSTALL_DIR/libhdfs.so:" $LIBHDFS_BUILD_DIR/$HDFS_TEST
-BUILD_STATUS=$?
-sleep 3
-$HADOOP_BIN_DIR/hadoop-daemon.sh --script $HADOOP_BIN_DIR/hdfs stop datanode && sleep 2
-$HADOOP_BIN_DIR/hadoop-daemon.sh --script $HADOOP_BIN_DIR/hdfs stop namenode && sleep 2
+rm $HDFS_TEST_CONF_DIR/core-site.xml
-if [ $unpacked_common_bin_dir -eq 1 ]; then
- rm -rf bin.tgz
+$HADOOP_HOME/bin/hadoop jar $HDFS_TEST_JAR \
+ org.apache.hadoop.test.MiniDFSClusterManager \
+ -format -nnport 20300 -writeConfig $HDFS_TEST_CONF_DIR/core-site.xml \
+ > /tmp/libhdfs-test-cluster.out 2>&1 &
+
+MINI_CLUSTER_PID=$!
+for i in {1..15}; do
+ echo "Waiting for DFS cluster, attempt $i of 15"
+ [ -f $HDFS_TEST_CONF_DIR/core-site.xml ] && break;
+ sleep 2
+done
+
+if [ ! -f $HDFS_TEST_CONF_DIR/core-site.xml ]; then
+ echo "Cluster did not come up in 30s"
+ kill -9 $MINI_CLUSTER_PID
+ exit 1
fi
-echo exiting with $BUILD_STATUS
+echo "Cluster up, running tests"
+# Disable error checking to make sure we get to cluster cleanup
+set +e
+
+CLASSPATH=$CLASSPATH \
+LD_PRELOAD="$LIB_JVM_DIR/libjvm.so:$LIBHDFS_INSTALL_DIR/libhdfs.so:" \
+$LIBHDFS_BUILD_DIR/$HDFS_TEST
+
+BUILD_STATUS=$?
+
+echo "Tearing cluster down"
+kill -9 $MINI_CLUSTER_PID
+echo "Exiting with $BUILD_STATUS"
exit $BUILD_STATUS
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto Sat Apr 7 22:53:55 2012
@@ -36,16 +36,18 @@ message JournalInfoProto {
}
/**
- * JournalInfo - the information about the journal
+ * journalInfo - the information about the journal
* firstTxnId - the first txid in the journal records
* numTxns - Number of transactions in editlog
* records - bytes containing serialized journal records
+ * epoch - change to this represents change of journal writer
*/
message JournalRequestProto {
required JournalInfoProto journalInfo = 1;
required uint64 firstTxnId = 2;
required uint32 numTxns = 3;
required bytes records = 4;
+ required uint64 epoch = 5;
}
/**
@@ -55,12 +57,13 @@ message JournalResponseProto {
}
/**
- * JournalInfo - the information about the journal
+ * journalInfo - the information about the journal
* txid - first txid in the new log
*/
message StartLogSegmentRequestProto {
- required JournalInfoProto journalInfo = 1;
- required uint64 txid = 2;
+ required JournalInfoProto journalInfo = 1; // Info about the journal
+ required uint64 txid = 2; // Transaction ID
+ required uint64 epoch = 3;
}
/**
@@ -70,6 +73,27 @@ message StartLogSegmentResponseProto {
}
/**
+ * journalInfo - the information about the journal
+ * txid - first txid in the new log
+ */
+message FenceRequestProto {
+ required JournalInfoProto journalInfo = 1; // Info about the journal
+ required uint64 epoch = 2; // Epoch - change indicates change in writer
+ optional string fencerInfo = 3; // Info about fencer for debugging
+}
+
+/**
+ * previousEpoch - previous epoch if any or zero
+ * lastTransactionId - last valid transaction Id in the journal
+ * inSync - if all journal segments are available and in sync
+ */
+message FenceResponseProto {
+ optional uint64 previousEpoch = 1;
+ optional uint64 lastTransactionId = 2;
+ optional bool inSync = 3;
+}
+
+/**
* Protocol used to journal edits to a remote node. Currently,
* this is used to publish edits from the NameNode to a BackupNode.
*
@@ -89,4 +113,10 @@ service JournalProtocolService {
*/
rpc startLogSegment(StartLogSegmentRequestProto)
returns (StartLogSegmentResponseProto);
+
+ /**
+ * Request to fence a journal receiver.
+ */
+ rpc fence(FenceRequestProto)
+ returns (FenceResponseProto);
}
Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1310174-1310901
Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1310174-1310901
Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1310174-1310901
Propchange: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1310174-1310901
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java Sat Apr 7 22:53:55 2012
@@ -30,6 +30,7 @@ import java.net.URISyntaxException;
import javax.security.auth.login.LoginException;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
@@ -48,13 +49,13 @@ import static org.junit.Assert.*;
public class TestViewFsFileStatusHdfs {
static final String testfilename = "/tmp/testFileStatusSerialziation";
+ static final String someFile = "/hdfstmp/someFileForTestGetFileChecksum";
-
-
private static MiniDFSCluster cluster;
private static Path defaultWorkingDirectory;
private static Configuration CONF = new Configuration();
private static FileSystem fHdfs;
+ private static FileSystem vfs;
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
@@ -65,18 +66,19 @@ public class TestViewFsFileStatusHdfs {
defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName()));
fHdfs.mkdirs(defaultWorkingDirectory);
+
+ // Setup the ViewFS to be used for all tests.
+ Configuration conf = ViewFileSystemTestSetup.createConfig();
+ ConfigUtil.addLink(conf, "/vfstmp", new URI(fHdfs.getUri() + "/hdfstmp"));
+ ConfigUtil.addLink(conf, "/tmp", new URI(fHdfs.getUri() + "/tmp"));
+ vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
+ assertEquals(ViewFileSystem.class, vfs.getClass());
}
@Test
public void testFileStatusSerialziation()
throws IOException, URISyntaxException {
-
long len = FileSystemTestHelper.createFile(fHdfs, testfilename);
-
- Configuration conf = ViewFileSystemTestSetup.createConfig();
- ConfigUtil.addLink(conf, "/tmp", new URI(fHdfs.getUri().toString() + "/tmp"));
- FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
- assertEquals(ViewFileSystem.class, vfs.getClass());
FileStatus stat = vfs.getFileStatus(new Path(testfilename));
assertEquals(len, stat.getLen());
// check serialization/deserialization
@@ -89,9 +91,34 @@ public class TestViewFsFileStatusHdfs {
assertEquals(len, deSer.getLen());
}
+ @Test
+ public void testGetFileChecksum() throws IOException, URISyntaxException {
+ // Create two different files in HDFS
+ FileSystemTestHelper.createFile(fHdfs, someFile);
+ FileSystemTestHelper.createFile(fHdfs, FileSystemTestHelper
+ .getTestRootPath(fHdfs, someFile + "other"), 1, 512);
+ // Get checksum through ViewFS
+ FileChecksum viewFSCheckSum = vfs.getFileChecksum(
+ new Path("/vfstmp/someFileForTestGetFileChecksum"));
+ // Get checksum through HDFS.
+ FileChecksum hdfsCheckSum = fHdfs.getFileChecksum(
+ new Path(someFile));
+ // Get checksum of different file in HDFS
+ FileChecksum otherHdfsFileCheckSum = fHdfs.getFileChecksum(
+ new Path(someFile+"other"));
+ // Checksums of the same file (got through HDFS and ViewFS should be same)
+ assertEquals("HDFS and ViewFS checksums were not the same", viewFSCheckSum,
+ hdfsCheckSum);
+ // Checksum of different files should be different.
+ assertFalse("Some other HDFS file which should not have had the same " +
+ "checksum as viewFS did!", viewFSCheckSum.equals(otherHdfsFileCheckSum));
+ }
+
@AfterClass
public static void cleanup() throws IOException {
fHdfs.delete(new Path(testfilename), true);
+ fHdfs.delete(new Path(someFile), true);
+ fHdfs.delete(new Path(someFile + "other"), true);
}
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/journalservice/TestJournalService.java Sat Apr 7 22:53:55 2012
@@ -20,12 +20,18 @@ package org.apache.hadoop.hdfs.server.jo
import java.io.IOException;
import java.net.InetSocketAddress;
+import junit.framework.Assert;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.FenceResponse;
+import org.apache.hadoop.hdfs.server.protocol.FencedException;
+import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
import org.junit.Test;
import org.mockito.Mockito;
@@ -42,7 +48,7 @@ public class TestJournalService {
* called.
*/
@Test
- public void testCallBacks() throws IOException {
+ public void testCallBacks() throws Exception {
JournalListener listener = Mockito.mock(JournalListener.class);
JournalService service = null;
try {
@@ -51,6 +57,7 @@ public class TestJournalService {
service = startJournalService(listener);
verifyRollLogsCallback(service, listener);
verifyJournalCallback(service, listener);
+ verifyFence(service, cluster.getNameNode(0));
} finally {
if (service != null) {
service.stop();
@@ -93,4 +100,28 @@ public class TestJournalService {
Mockito.verify(l, Mockito.atLeastOnce()).journal(Mockito.eq(s),
Mockito.anyLong(), Mockito.anyInt(), (byte[]) Mockito.any());
}
+
+ public void verifyFence(JournalService s, NameNode nn) throws Exception {
+ String cid = nn.getNamesystem().getClusterId();
+ int nsId = nn.getNamesystem().getFSImage().getNamespaceID();
+ int lv = nn.getNamesystem().getFSImage().getLayoutVersion();
+
+ // Fence the journal service
+ JournalInfo info = new JournalInfo(lv, cid, nsId);
+ long currentEpoch = s.getEpoch();
+
+ // New epoch lower than the current epoch is rejected
+ try {
+ s.fence(info, (currentEpoch - 1), "fencer");
+ } catch (FencedException ignore) { /* Ignored */ }
+
+ // New epoch equal to the current epoch is rejected
+ try {
+ s.fence(info, currentEpoch, "fencer");
+ } catch (FencedException ignore) { /* Ignored */ }
+
+ // New epoch higher than the current epoch is successful
+ FenceResponse resp = s.fence(info, currentEpoch+1, "fencer");
+ Assert.assertNotNull(resp);
+ }
}
\ No newline at end of file
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java?rev=1310905&r1=1310904&r2=1310905&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java Sat Apr 7 22:53:55 2012
@@ -42,6 +42,8 @@ import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;
+import com.google.common.base.Joiner;
+
/**
* Test for {@link GetConf}
*/
@@ -117,7 +119,12 @@ public class TestGetConf {
PrintStream out = new PrintStream(o, true);
try {
int ret = ToolRunner.run(new GetConf(conf, out, out), args);
- assertEquals(success, ret == 0);
+ out.flush();
+ System.err.println("Output: " + o.toString());
+ assertEquals("Expected " + (success?"success":"failure") +
+ " for args: " + Joiner.on(" ").join(args) + "\n" +
+ "Output: " + o.toString(),
+ success, ret == 0);
return o.toString();
} finally {
o.close();
@@ -222,7 +229,9 @@ public class TestGetConf {
getAddressListFromTool(TestType.SECONDARY, conf, false);
getAddressListFromTool(TestType.NNRPCADDRESSES, conf, false);
for (Command cmd : Command.values()) {
- CommandHandler handler = Command.getHandler(cmd.getName());
+ String arg = cmd.getName();
+ CommandHandler handler = Command.getHandler(arg);
+ assertNotNull("missing handler: " + cmd, handler);
if (handler.key != null) {
// First test with configuration missing the required key
String[] args = {handler.key};
@@ -319,18 +328,36 @@ public class TestGetConf {
verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
}
+
+ @Test
+ public void testGetSpecificKey() throws Exception {
+ HdfsConfiguration conf = new HdfsConfiguration();
+ conf.set("mykey", " myval ");
+ String[] args = {"-confKey", "mykey"};
+ assertTrue(runTool(conf, args, true).equals("myval\n"));
+ }
+
+ @Test
+ public void testExtraArgsThrowsError() throws Exception {
+ HdfsConfiguration conf = new HdfsConfiguration();
+ conf.set("mykey", "myval");
+ String[] args = {"-namenodes", "unexpected-arg"};
+ assertTrue(runTool(conf, args, false).contains(
+ "Did not expect argument: unexpected-arg"));
+ }
/**
* Tests commands other than {@link Command#NAMENODE}, {@link Command#BACKUP},
* {@link Command#SECONDARY} and {@link Command#NNRPCADDRESSES}
*/
+ @Test
public void testTool() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration(false);
for (Command cmd : Command.values()) {
CommandHandler handler = Command.getHandler(cmd.getName());
- if (handler.key != null) {
+ if (handler.key != null && !"-confKey".equals(cmd.getName())) {
// Add the key to the conf and ensure tool returns the right value
- String[] args = {handler.key};
+ String[] args = {cmd.getName()};
conf.set(handler.key, "value");
assertTrue(runTool(conf, args, true).contains("value"));
}