You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2012/11/08 20:10:04 UTC
svn commit: r1407217 [4/7] - in
/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs:
./ src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/
src/contrib/bkjournal/src/main/proto/ src/contrib/bkjournal/src/test/j...
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c Thu Nov 8 19:09:46 2012
@@ -22,97 +22,90 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <inttypes.h>
int main(int argc, char **argv) {
+ hdfsFS fs;
+ const char* writeFileName;
+ off_t fileTotalSize;
+ long long tmpBufferSize;
+ tSize bufferSize = 0, totalWriteSize = 0, toWrite = 0, written = 0;
+ hdfsFile writeFile = NULL;
+ int append, i = 0;
+ char* buffer = NULL;
if (argc != 6) {
- fprintf(stderr, "Usage: hdfs_write <filename> <filesize> <buffersize> <username> <append>\n");
- exit(-1);
+ fprintf(stderr, "Usage: test_libwebhdfs_write <filename> <filesize> "
+ "<buffersize> <username> <append>\n");
+ exit(1);
}
- hdfsFS fs = hdfsConnectAsUser("0.0.0.0", 50070, argv[4]);
+ fs = hdfsConnectAsUser("default", 50070, argv[4]);
if (!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
- exit(-1);
+ exit(1);
}
- const char* writeFileName = argv[1];
- off_t fileTotalSize = strtoul(argv[2], NULL, 10);
- long long tmpBufferSize = strtoul(argv[3], NULL, 10);
+ writeFileName = argv[1];
+ fileTotalSize = strtoul(argv[2], NULL, 10);
+ tmpBufferSize = strtoul(argv[3], NULL, 10);
// sanity check
if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
- fprintf(stderr, "invalid file size %s - must be <= %lu\n", argv[2], ULONG_MAX);
- exit(-3);
+ fprintf(stderr, "invalid file size %s - must be <= %lu\n",
+ argv[2], ULONG_MAX);
+ exit(1);
}
// currently libhdfs writes are of tSize which is int32
if(tmpBufferSize > INT_MAX) {
- fprintf(stderr, "invalid buffer size libhdfs API write chunks must be <= %d\n",INT_MAX);
- exit(-3);
+ fprintf(stderr,
+ "invalid buffer size libhdfs API write chunks must be <= %d\n",
+ INT_MAX);
+ exit(1);
}
- tSize bufferSize = tmpBufferSize;
-
- hdfsFile writeFile = NULL;
- int append = atoi(argv[5]);
+ bufferSize = (tSize) tmpBufferSize;
+ append = atoi(argv[5]);
if (!append) {
writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 2, 0);
} else {
- writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY | O_APPEND, bufferSize, 2, 0);
+ writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY | O_APPEND,
+ bufferSize, 2, 0);
}
if (!writeFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);
- exit(-2);
+ exit(1);
}
// data to be written to the file
- char* buffer = malloc(sizeof(char) * bufferSize + 1);
+ buffer = malloc(sizeof(char) * bufferSize + 1);
if(buffer == NULL) {
fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize);
- return -2;
+ exit(1);
}
- int i = 0;
- for (i=0; i < bufferSize; ++i) {
+ for (i = 0; i < bufferSize; ++i) {
buffer[i] = 'a' + (i%26);
}
buffer[bufferSize] = '\0';
- size_t totalWriteSize = 0;
+ // write to the file
+ totalWriteSize = 0;
for (; totalWriteSize < fileTotalSize; ) {
- tSize toWrite = bufferSize < (fileTotalSize - totalWriteSize) ? bufferSize : (fileTotalSize - totalWriteSize);
- size_t written = hdfsWrite(fs, writeFile, (void*)buffer, toWrite);
- fprintf(stderr, "written size %ld, to write size %d\n", written, toWrite);
+ toWrite = bufferSize < (fileTotalSize - totalWriteSize) ?
+ bufferSize : (fileTotalSize - totalWriteSize);
+ written = hdfsWrite(fs, writeFile, (void*)buffer, toWrite);
+ fprintf(stderr, "written size %d, to write size %d\n",
+ written, toWrite);
totalWriteSize += written;
- //sleep(1);
}
+ // cleanup
free(buffer);
hdfsCloseFile(fs, writeFile);
-
- fprintf(stderr, "file total size: %lld, total write size: %ld\n", fileTotalSize, totalWriteSize);
-
- hdfsFile readFile = hdfsOpenFile(fs, writeFileName, O_RDONLY, 0, 0, 0);
- //sleep(1);
- fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
-
- hdfsFile writeFile2 = hdfsOpenFile(fs, writeFileName, O_WRONLY | O_APPEND, 0, 2, 0);
- fprintf(stderr, "Opened %s for writing successfully...\n", writeFileName);
- const char *content = "Hello, World!";
- size_t num_written_bytes = hdfsWrite(fs, writeFile2, content, strlen(content) + 1);
- if (num_written_bytes != strlen(content) + 1) {
- fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
- (int)(strlen(content) + 1), (int)num_written_bytes);
- exit(-1);
- }
- fprintf(stderr, "Wrote %zd bytes\n", num_written_bytes);
-
+ fprintf(stderr, "file total size: %" PRId64 ", total write size: %d\n",
+ fileTotalSize, totalWriteSize);
hdfsDisconnect(fs);
return 0;
}
-
-/**
- * vim: ts=4: sw=4: et:
- */
-
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1401063-1407201
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Thu Nov 8 19:09:46 2012
@@ -652,7 +652,7 @@ public class DFSClient implements java.i
// if there is no more clients under the renewer.
getLeaseRenewer().closeClient(this);
} catch (IOException ioe) {
- LOG.info("Exception occurred while aborting the client. " + ioe);
+ LOG.info("Exception occurred while aborting the client " + ioe);
}
closeConnectionToNamenode();
}
@@ -1769,6 +1769,13 @@ public class DFSClient implements java.i
return new MD5MD5CRC32CastagnoliFileChecksum(bytesPerCRC,
crcPerBlock, fileMD5);
default:
+ // If there is no block allocated for the file,
+ // return one with the magic entry that matches what previous
+ // hdfs versions return.
+ if (locatedblocks.size() == 0) {
+ return new MD5MD5CRC32GzipFileChecksum(0, 0, fileMD5);
+ }
+
// we should never get here since the validity was checked
// when getCrcType() was called above.
return null;
@@ -2104,7 +2111,7 @@ public class DFSClient implements java.i
reportBadBlocks(lblocks);
} catch (IOException ie) {
LOG.info("Found corruption while reading " + file
- + ". Error repairing corrupt blocks. Bad blocks remain.", ie);
+ + ". Error repairing corrupt blocks. Bad blocks remain.", ie);
}
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java Thu Nov 8 19:09:46 2012
@@ -457,7 +457,7 @@ public class DFSInputStream extends FSIn
buffersize, verifyChecksum, dfsClient.clientName);
if(connectFailedOnce) {
DFSClient.LOG.info("Successfully connected to " + targetAddr +
- " for block " + blk.getBlockId());
+ " for " + blk);
}
return chosenNode;
} catch (IOException ex) {
@@ -736,9 +736,9 @@ public class DFSInputStream extends FSIn
}
if (nodes == null || nodes.length == 0) {
- DFSClient.LOG.info("No node available for block: " + blockInfo);
+ DFSClient.LOG.info("No node available for " + blockInfo);
}
- DFSClient.LOG.info("Could not obtain block " + block.getBlock()
+ DFSClient.LOG.info("Could not obtain " + block.getBlock()
+ " from any node: " + ie
+ ". Will get new block locations from namenode and retry...");
try {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Thu Nov 8 19:09:46 2012
@@ -735,7 +735,7 @@ public class DFSOutputStream extends FSO
//
private boolean processDatanodeError() throws IOException {
if (response != null) {
- DFSClient.LOG.info("Error Recovery for block " + block +
+ DFSClient.LOG.info("Error Recovery for " + block +
" waiting for responder to exit. ");
return true;
}
@@ -1008,7 +1008,7 @@ public class DFSOutputStream extends FSO
success = createBlockOutputStream(nodes, 0L, false);
if (!success) {
- DFSClient.LOG.info("Abandoning block " + block);
+ DFSClient.LOG.info("Abandoning " + block);
dfsClient.namenode.abandonBlock(block, src, dfsClient.clientName);
block = null;
DFSClient.LOG.info("Excluding datanode " + nodes[errorIndex]);
@@ -1773,7 +1773,7 @@ public class DFSOutputStream extends FSO
try {
Thread.sleep(400);
if (Time.now() - localstart > 5000) {
- DFSClient.LOG.info("Could not complete file " + src + " retrying...");
+ DFSClient.LOG.info("Could not complete " + src + " retrying...");
}
} catch (InterruptedException ie) {
DFSClient.LOG.warn("Caught exception ", ie);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Thu Nov 8 19:09:46 2012
@@ -702,7 +702,7 @@ public class DistributedFileSystem exten
}
DatanodeInfo[] dataNode = {dfsIn.getCurrentDatanode()};
lblocks[0] = new LocatedBlock(dataBlock, dataNode);
- LOG.info("Found checksum error in data stream at block="
+ LOG.info("Found checksum error in data stream at "
+ dataBlock + " on datanode="
+ dataNode[0]);
@@ -715,7 +715,7 @@ public class DistributedFileSystem exten
}
DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()};
lblocks[1] = new LocatedBlock(sumsBlock, sumsNode);
- LOG.info("Found checksum error in checksum stream at block="
+ LOG.info("Found checksum error in checksum stream at "
+ sumsBlock + " on datanode=" + sumsNode[0]);
// Ask client to delete blocks.
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java Thu Nov 8 19:09:46 2012
@@ -157,11 +157,11 @@ public abstract class HdfsProtoUtil {
}
public static DataChecksum.Type fromProto(HdfsProtos.ChecksumTypeProto type) {
- return DataChecksum.Type.valueOf(type.name());
+ return DataChecksum.Type.valueOf(type.getNumber());
}
public static HdfsProtos.ChecksumTypeProto toProto(DataChecksum.Type type) {
- return HdfsProtos.ChecksumTypeProto.valueOf(type.name());
+ return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
}
public static InputStream vintPrefixed(final InputStream input)
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java Thu Nov 8 19:09:46 2012
@@ -52,7 +52,7 @@ public abstract class DataTransferProtoU
}
public static ChecksumProto toProto(DataChecksum checksum) {
- ChecksumTypeProto type = ChecksumTypeProto.valueOf(checksum.getChecksumType().name());
+ ChecksumTypeProto type = HdfsProtoUtil.toProto(checksum.getChecksumType());
if (type == null) {
throw new IllegalArgumentException(
"Can't convert checksum to protobuf: " + checksum);
@@ -68,7 +68,7 @@ public abstract class DataTransferProtoU
if (proto == null) return null;
int bytesPerChecksum = proto.getBytesPerChecksum();
- DataChecksum.Type type = DataChecksum.Type.valueOf(proto.getType().name());
+ DataChecksum.Type type = HdfsProtoUtil.fromProto(proto.getType());
return DataChecksum.newDataChecksum(type, bytesPerChecksum);
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Thu Nov 8 19:09:46 2012
@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
@@ -67,7 +68,6 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
@@ -129,7 +129,6 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.security.token.Token;
import com.google.protobuf.ByteString;
@@ -961,7 +960,7 @@ public class PBHelper {
fs.getFileBufferSize(),
fs.getEncryptDataTransfer(),
fs.getTrashInterval(),
- DataChecksum.Type.valueOf(fs.getChecksumType().name()));
+ HdfsProtoUtil.fromProto(fs.getChecksumType()));
}
public static FsServerDefaultsProto convert(FsServerDefaults fs) {
@@ -974,7 +973,7 @@ public class PBHelper {
.setFileBufferSize(fs.getFileBufferSize())
.setEncryptDataTransfer(fs.getEncryptDataTransfer())
.setTrashInterval(fs.getTrashInterval())
- .setChecksumType(ChecksumTypeProto.valueOf(fs.getChecksumType().name()))
+ .setChecksumType(HdfsProtoUtil.toProto(fs.getChecksumType()))
.build();
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java Thu Nov 8 19:09:46 2012
@@ -171,8 +171,7 @@ class JNStorage extends Storage {
void format(NamespaceInfo nsInfo) throws IOException {
setStorageInfo(nsInfo);
- LOG.info("Formatting journal storage directory " +
- sd + " with nsid: " + getNamespaceID());
+ LOG.info("Formatting journal " + sd + " with nsid: " + getNamespaceID());
// Unlock the directory before formatting, because we will
// re-analyze it after format(). The analyzeStorage() call
// below is reponsible for re-locking it. This is a no-op
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java Thu Nov 8 19:09:46 2012
@@ -35,6 +35,7 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@@ -230,6 +231,7 @@ public class JournalNode implements Tool
}
public static void main(String[] args) throws Exception {
+ StringUtils.startupShutdownMessage(JournalNode.class, args, LOG);
System.exit(ToolRunner.run(new JournalNode(), args));
}
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Thu Nov 8 19:09:46 2012
@@ -950,8 +950,8 @@ public class BlockManager {
datanodes.append(node).append(" ");
}
if (datanodes.length() != 0) {
- NameNode.stateChangeLog.info("BLOCK* addToInvalidates: "
- + b + " to " + datanodes.toString());
+ NameNode.stateChangeLog.info("BLOCK* addToInvalidates: " + b + " "
+ + datanodes);
}
}
@@ -972,7 +972,7 @@ public class BlockManager {
// thread of Datanode reports bad block before Block reports are sent
// by the Datanode on startup
NameNode.stateChangeLog.info("BLOCK* findAndMarkBlockAsCorrupt: "
- + blk + " not found.");
+ + blk + " not found");
return;
}
markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, reason), dn);
@@ -1026,7 +1026,7 @@ public class BlockManager {
NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: postponing " +
"invalidation of " + b + " on " + dn + " because " +
nr.replicasOnStaleNodes() + " replica(s) are located on nodes " +
- "with potentially out-of-date block reports.");
+ "with potentially out-of-date block reports");
postponeBlock(b.corrupted);
} else if (nr.liveReplicas() >= 1) {
@@ -1039,7 +1039,7 @@ public class BlockManager {
}
} else {
NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: " + b
- + " on " + dn + " is the only copy and was not deleted.");
+ + " on " + dn + " is the only copy and was not deleted");
}
}
@@ -1160,9 +1160,8 @@ public class BlockManager {
(blockHasEnoughRacks(block)) ) {
neededReplications.remove(block, priority); // remove from neededReplications
neededReplications.decrementReplicationIndex(priority);
- NameNode.stateChangeLog.info("BLOCK* "
- + "Removing block " + block
- + " from neededReplications as it has enough replicas.");
+ NameNode.stateChangeLog.info("BLOCK* Removing " + block
+ + " from neededReplications as it has enough replicas");
continue;
}
}
@@ -1236,9 +1235,8 @@ public class BlockManager {
neededReplications.remove(block, priority); // remove from neededReplications
neededReplications.decrementReplicationIndex(priority);
rw.targets = null;
- NameNode.stateChangeLog.info("BLOCK* "
- + "Removing block " + block
- + " from neededReplications as it has enough replicas.");
+ NameNode.stateChangeLog.info("BLOCK* Removing " + block
+ + " from neededReplications as it has enough replicas");
continue;
}
}
@@ -1290,10 +1288,8 @@ public class BlockManager {
targetList.append(' ');
targetList.append(targets[k]);
}
- NameNode.stateChangeLog.info(
- "BLOCK* ask "
- + rw.srcNode + " to replicate "
- + rw.block + " to " + targetList);
+ NameNode.stateChangeLog.info("BLOCK* ask " + rw.srcNode
+ + " to replicate " + rw.block + " to " + targetList);
}
}
}
@@ -1527,10 +1523,9 @@ public class BlockManager {
boolean staleBefore = node.areBlockContentsStale();
node.receivedBlockReport();
if (staleBefore && !node.areBlockContentsStale()) {
- LOG.info("BLOCK* processReport: " +
- "Received first block report from " + node +
- " after becoming active. Its block contents are no longer" +
- " considered stale.");
+ LOG.info("BLOCK* processReport: Received first block report from "
+ + node + " after becoming active. Its block contents are no longer"
+ + " considered stale");
rescanPostponedMisreplicatedBlocks();
}
@@ -1601,9 +1596,9 @@ public class BlockManager {
addStoredBlock(b, node, null, true);
}
for (Block b : toInvalidate) {
- NameNode.stateChangeLog.info("BLOCK* processReport: block "
+ NameNode.stateChangeLog.info("BLOCK* processReport: "
+ b + " on " + node + " size " + b.getNumBytes()
- + " does not belong to any file.");
+ + " does not belong to any file");
addToInvalidates(b, node);
}
for (BlockToMarkCorrupt b : toCorrupt) {
@@ -1870,7 +1865,7 @@ assert storedBlock.findDatanode(dn) < 0
int count = pendingDNMessages.count();
if (count > 0) {
LOG.info("Processing " + count + " messages from DataNodes " +
- "that were previously queued during standby state.");
+ "that were previously queued during standby state");
}
processQueuedMessages(pendingDNMessages.takeAll());
assert pendingDNMessages.count() == 0;
@@ -1927,9 +1922,9 @@ assert storedBlock.findDatanode(dn) < 0
// the block report got a little bit delayed after the pipeline
// closed. So, ignore this report, assuming we will get a
// FINALIZED replica later. See HDFS-2791
- LOG.info("Received an RBW replica for block " + storedBlock +
- " on " + dn + ": ignoring it, since the block is " +
- "complete with the same generation stamp.");
+ LOG.info("Received an RBW replica for " + storedBlock +
+ " on " + dn + ": ignoring it, since it is " +
+ "complete with the same genstamp");
return null;
} else {
return new BlockToMarkCorrupt(storedBlock,
@@ -2041,7 +2036,7 @@ assert storedBlock.findDatanode(dn) < 0
// If this block does not belong to anyfile, then we are done.
NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
+ node + " size " + block.getNumBytes()
- + " but it does not belong to any file.");
+ + " but it does not belong to any file");
// we could add this block to invalidate set of this datanode.
// it will happen in next block report otherwise.
return block;
@@ -2158,9 +2153,8 @@ assert storedBlock.findDatanode(dn) < 0
try {
invalidateBlock(new BlockToMarkCorrupt(blk, null), node);
} catch (IOException e) {
- NameNode.stateChangeLog.info("NameNode.invalidateCorruptReplicas " +
- "error in deleting bad block " + blk +
- " on " + node, e);
+ NameNode.stateChangeLog.info("invalidateCorruptReplicas "
+ + "error in deleting bad block " + blk + " on " + node, e);
gotException = true;
}
}
@@ -2308,7 +2302,7 @@ assert storedBlock.findDatanode(dn) < 0
DatanodeDescriptor cur = it.next();
if (cur.areBlockContentsStale()) {
LOG.info("BLOCK* processOverReplicatedBlock: " +
- "Postponing processing of over-replicated block " +
+ "Postponing processing of over-replicated " +
block + " since datanode " + cur + " does not yet have up-to-date " +
"block information.");
postponeBlock(block);
@@ -2398,7 +2392,7 @@ assert storedBlock.findDatanode(dn) < 0
//
addToInvalidates(b, cur);
NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: "
- +"("+cur+", "+b+") is added to invalidated blocks set.");
+ +"("+cur+", "+b+") is added to invalidated blocks set");
}
}
@@ -2540,7 +2534,7 @@ assert storedBlock.findDatanode(dn) < 0
for (Block b : toInvalidate) {
NameNode.stateChangeLog.info("BLOCK* addBlock: block "
+ b + " on " + node + " size " + b.getNumBytes()
- + " does not belong to any file.");
+ + " does not belong to any file");
addToInvalidates(b, node);
}
for (BlockToMarkCorrupt b : toCorrupt) {
@@ -2651,7 +2645,7 @@ assert storedBlock.findDatanode(dn) < 0
* of live nodes. If in startup safemode (or its 30-sec extension period),
* then it gains speed by ignoring issues of excess replicas or nodes
* that are decommissioned or in process of becoming decommissioned.
- * If not in startup, then it calls {@link countNodes()} instead.
+ * If not in startup, then it calls {@link #countNodes(Block)} instead.
*
* @param b - the block being tested
* @return count of live nodes for this block
@@ -2702,6 +2696,7 @@ assert storedBlock.findDatanode(dn) < 0
void processOverReplicatedBlocksOnReCommission(
final DatanodeDescriptor srcNode) {
final Iterator<? extends Block> it = srcNode.getBlockIterator();
+ int numOverReplicated = 0;
while(it.hasNext()) {
final Block block = it.next();
BlockCollection bc = blocksMap.getBlockCollection(block);
@@ -2711,8 +2706,11 @@ assert storedBlock.findDatanode(dn) < 0
if (numCurrentReplica > expectedReplication) {
// over-replicated block
processOverReplicatedBlock(block, expectedReplication, null, null);
+ numOverReplicated++;
}
}
+ LOG.info("Invalidated " + numOverReplicated + " over-replicated blocks on " +
+ srcNode + " during recommissioning");
}
/**
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Thu Nov 8 19:09:46 2012
@@ -185,7 +185,7 @@ public class BlockPlacementPolicyDefault
if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) {
return writer;
}
- int totalReplicasExpected = numOfReplicas;
+ int totalReplicasExpected = numOfReplicas + results.size();
int numOfResults = results.size();
boolean newBlock = (numOfResults==0);
@@ -231,7 +231,8 @@ public class BlockPlacementPolicyDefault
maxNodesPerRack, results, avoidStaleNodes);
} catch (NotEnoughReplicasException e) {
LOG.warn("Not able to place enough replicas, still in need of "
- + numOfReplicas + " to reach " + totalReplicasExpected + "\n"
+ + (totalReplicasExpected - results.size()) + " to reach "
+ + totalReplicasExpected + "\n"
+ e.getMessage());
if (avoidStaleNodes) {
// ecxludedNodes now has - initial excludedNodes, any nodes that were
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java Thu Nov 8 19:09:46 2012
@@ -362,8 +362,7 @@ public class DatanodeDescriptor extends
void addBlockToBeRecovered(BlockInfoUnderConstruction block) {
if(recoverBlocks.contains(block)) {
// this prevents adding the same block twice to the recovery queue
- BlockManager.LOG.info("Block " + block +
- " is already in the recovery queue.");
+ BlockManager.LOG.info(block + " is already in the recovery queue");
return;
}
recoverBlocks.offer(block);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Thu Nov 8 19:09:46 2012
@@ -584,7 +584,7 @@ public class DatanodeManager {
if (node.isDecommissionInProgress()) {
if (!blockManager.isReplicationInProgress(node)) {
node.setDecommissioned();
- LOG.info("Decommission complete for node " + node);
+ LOG.info("Decommission complete for " + node);
}
}
return node.isDecommissioned();
@@ -593,8 +593,8 @@ public class DatanodeManager {
/** Start decommissioning the specified datanode. */
private void startDecommission(DatanodeDescriptor node) {
if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
- LOG.info("Start Decommissioning node " + node + " with " +
- node.numBlocks() + " blocks.");
+ LOG.info("Start Decommissioning " + node + " with " +
+ node.numBlocks() + " blocks");
heartbeatManager.startDecommission(node);
node.decommissioningStatus.setStartTime(now());
@@ -606,9 +606,13 @@ public class DatanodeManager {
/** Stop decommissioning the specified datanodes. */
void stopDecommission(DatanodeDescriptor node) {
if (node.isDecommissionInProgress() || node.isDecommissioned()) {
- LOG.info("Stop Decommissioning node " + node);
+ LOG.info("Stop Decommissioning " + node);
heartbeatManager.stopDecommission(node);
- blockManager.processOverReplicatedBlocksOnReCommission(node);
+ // Over-replicated blocks will be detected and processed when
+ // the dead node comes back and send in its full block report.
+ if (node.isAlive) {
+ blockManager.processOverReplicatedBlocksOnReCommission(node);
+ }
}
}
@@ -658,17 +662,15 @@ public class DatanodeManager {
throw new DisallowedDatanodeException(nodeReg);
}
- NameNode.stateChangeLog.info("BLOCK* NameSystem.registerDatanode: "
- + "node registration from " + nodeReg
- + " storage " + nodeReg.getStorageID());
+ NameNode.stateChangeLog.info("BLOCK* registerDatanode: from "
+ + nodeReg + " storage " + nodeReg.getStorageID());
DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
DatanodeDescriptor nodeN = host2DatanodeMap.getDatanodeByXferAddr(
nodeReg.getIpAddr(), nodeReg.getXferPort());
if (nodeN != null && nodeN != nodeS) {
- NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: "
- + "node from name: " + nodeN);
+ NameNode.LOG.info("BLOCK* registerDatanode: " + nodeN);
// nodeN previously served a different data storage,
// which is not served by anybody anymore.
removeDatanode(nodeN);
@@ -683,8 +685,8 @@ public class DatanodeManager {
// storage. We do not need to remove old data blocks, the delta will
// be calculated on the next block report from the datanode
if(NameNode.stateChangeLog.isDebugEnabled()) {
- NameNode.stateChangeLog.debug("BLOCK* NameSystem.registerDatanode: "
- + "node restarted.");
+ NameNode.stateChangeLog.debug("BLOCK* registerDatanode: "
+ + "node restarted.");
}
} else {
// nodeS is found
@@ -696,11 +698,9 @@ public class DatanodeManager {
value in "VERSION" file under the data directory of the datanode,
but this is might not work if VERSION file format has changed
*/
- NameNode.stateChangeLog.info( "BLOCK* NameSystem.registerDatanode: "
- + "node " + nodeS
- + " is replaced by " + nodeReg +
- " with the same storageID " +
- nodeReg.getStorageID());
+ NameNode.stateChangeLog.info("BLOCK* registerDatanode: " + nodeS
+ + " is replaced by " + nodeReg + " with the same storageID "
+ + nodeReg.getStorageID());
}
// update cluster map
getNetworkTopology().remove(nodeS);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java Thu Nov 8 19:09:46 2012
@@ -433,7 +433,7 @@ public abstract class Storage extends St
if (!root.exists()) {
// storage directory does not exist
if (startOpt != StartupOption.FORMAT) {
- LOG.info("Storage directory " + rootPath + " does not exist.");
+ LOG.info("Storage directory " + rootPath + " does not exist");
return StorageState.NON_EXISTENT;
}
LOG.info(rootPath + " does not exist. Creating ...");
@@ -442,7 +442,7 @@ public abstract class Storage extends St
}
// or is inaccessible
if (!root.isDirectory()) {
- LOG.info(rootPath + "is not a directory.");
+ LOG.info(rootPath + "is not a directory");
return StorageState.NON_EXISTENT;
}
if (!root.canWrite()) {
@@ -539,34 +539,34 @@ public abstract class Storage extends St
switch(curState) {
case COMPLETE_UPGRADE: // mv previous.tmp -> previous
LOG.info("Completing previous upgrade for storage directory "
- + rootPath + ".");
+ + rootPath);
rename(getPreviousTmp(), getPreviousDir());
return;
case RECOVER_UPGRADE: // mv previous.tmp -> current
LOG.info("Recovering storage directory " + rootPath
- + " from previous upgrade.");
+ + " from previous upgrade");
if (curDir.exists())
deleteDir(curDir);
rename(getPreviousTmp(), curDir);
return;
case COMPLETE_ROLLBACK: // rm removed.tmp
LOG.info("Completing previous rollback for storage directory "
- + rootPath + ".");
+ + rootPath);
deleteDir(getRemovedTmp());
return;
case RECOVER_ROLLBACK: // mv removed.tmp -> current
LOG.info("Recovering storage directory " + rootPath
- + " from previous rollback.");
+ + " from previous rollback");
rename(getRemovedTmp(), curDir);
return;
case COMPLETE_FINALIZE: // rm finalized.tmp
LOG.info("Completing previous finalize for storage directory "
- + rootPath + ".");
+ + rootPath);
deleteDir(getFinalizedTmp());
return;
case COMPLETE_CHECKPOINT: // mv lastcheckpoint.tmp -> previous.checkpoint
LOG.info("Completing previous checkpoint for storage directory "
- + rootPath + ".");
+ + rootPath);
File prevCkptDir = getPreviousCheckpoint();
if (prevCkptDir.exists())
deleteDir(prevCkptDir);
@@ -574,7 +574,7 @@ public abstract class Storage extends St
return;
case RECOVER_CHECKPOINT: // mv lastcheckpoint.tmp -> current
LOG.info("Recovering storage directory " + rootPath
- + " from failed checkpoint.");
+ + " from failed checkpoint");
if (curDir.exists())
deleteDir(curDir);
rename(getLastCheckpointTmp(), curDir);
@@ -629,7 +629,7 @@ public abstract class Storage extends St
FileLock newLock = tryLock();
if (newLock == null) {
String msg = "Cannot lock storage " + this.root
- + ". The directory is already locked.";
+ + ". The directory is already locked";
LOG.info(msg);
throw new IOException(msg);
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java Thu Nov 8 19:09:46 2012
@@ -75,14 +75,18 @@ class BPServiceActor implements Runnable
BPOfferService bpos;
- long lastBlockReport = 0;
- long lastDeletedReport = 0;
+ // lastBlockReport, lastDeletedReport and lastHeartbeat may be assigned/read
+ // by testing threads (through BPServiceActor#triggerXXX), while also
+ // assigned/read by the actor thread. Thus they should be declared as volatile
+ // to make sure the "happens-before" consistency.
+ volatile long lastBlockReport = 0;
+ volatile long lastDeletedReport = 0;
boolean resetBlockReportTime = true;
Thread bpThread;
DatanodeProtocolClientSideTranslatorPB bpNamenode;
- private long lastHeartbeat = 0;
+ private volatile long lastHeartbeat = 0;
private volatile boolean initialized = false;
/**
@@ -637,8 +641,7 @@ class BPServiceActor implements Runnable
try {
Thread.sleep(millis);
} catch (InterruptedException ie) {
- LOG.info("BPOfferService " + this +
- " interrupted while " + stateString);
+ LOG.info("BPOfferService " + this + " interrupted while " + stateString);
}
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java Thu Nov 8 19:09:46 2012
@@ -106,15 +106,15 @@ class BlockPoolManager {
}
}
- void shutDownAll() throws InterruptedException {
- BPOfferService[] bposArray = this.getAllNamenodeThreads();
-
- for (BPOfferService bpos : bposArray) {
- bpos.stop(); //interrupts the threads
- }
- //now join
- for (BPOfferService bpos : bposArray) {
- bpos.join();
+ void shutDownAll(BPOfferService[] bposArray) throws InterruptedException {
+ if (bposArray != null) {
+ for (BPOfferService bpos : bposArray) {
+ bpos.stop(); //interrupts the threads
+ }
+ //now join
+ for (BPOfferService bpos : bposArray) {
+ bpos.join();
+ }
}
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Thu Nov 8 19:09:46 2012
@@ -154,7 +154,7 @@ class BlockPoolSliceScanner {
}
this.scanPeriod = hours * 3600 * 1000;
LOG.info("Periodic Block Verification Scanner initialized with interval "
- + hours + " hours for block pool " + bpid + ".");
+ + hours + " hours for block pool " + bpid);
// get the list of blocks and arrange them in random order
List<Block> arr = dataset.getFinalizedBlocks(blockPoolId);
@@ -310,12 +310,12 @@ class BlockPoolSliceScanner {
}
private void handleScanFailure(ExtendedBlock block) {
- LOG.info("Reporting bad block " + block);
+ LOG.info("Reporting bad " + block);
try {
datanode.reportBadBlocks(block);
} catch (IOException ie) {
// it is bad, but not bad enough to shutdown the scanner
- LOG.warn("Cannot report bad block=" + block.getBlockId());
+ LOG.warn("Cannot report bad " + block.getBlockId());
}
}
@@ -411,7 +411,7 @@ class BlockPoolSliceScanner {
// If the block does not exists anymore, then its not an error
if (!dataset.contains(block)) {
- LOG.info(block + " is no longer in the dataset.");
+ LOG.info(block + " is no longer in the dataset");
deleteBlock(block.getLocalBlock());
return;
}
@@ -424,7 +424,7 @@ class BlockPoolSliceScanner {
// is a block really deleted by mistake, DirectoryScan should catch it.
if (e instanceof FileNotFoundException ) {
LOG.info("Verification failed for " + block +
- ". It may be due to race with write.");
+ " - may be due to race with write");
deleteBlock(block.getLocalBlock());
return;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java Thu Nov 8 19:09:46 2012
@@ -332,7 +332,7 @@ public class BlockPoolSliceStorage exten
// 4.rename <SD>/curernt/<bpid>/previous.tmp to <SD>/curernt/<bpid>/previous
rename(bpTmpDir, bpPrevDir);
LOG.info("Upgrade of block pool " + blockpoolID + " at " + bpSd.getRoot()
- + " is complete.");
+ + " is complete");
}
/**
@@ -409,7 +409,7 @@ public class BlockPoolSliceStorage exten
// 3. delete removed.tmp dir
deleteDir(tmpDir);
- LOG.info("Rollback of " + bpSd.getRoot() + " is complete.");
+ LOG.info("Rollback of " + bpSd.getRoot() + " is complete");
}
/*
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Thu Nov 8 19:09:46 2012
@@ -319,9 +319,6 @@ class BlockReceiver implements Closeable
* @throws IOException
*/
void flushOrSync(boolean isSync) throws IOException {
- if (isSync && (out != null || checksumOut != null)) {
- datanode.metrics.incrFsyncCount();
- }
long flushTotalNanos = 0;
if (checksumOut != null) {
long flushStartNanos = System.nanoTime();
@@ -347,6 +344,9 @@ class BlockReceiver implements Closeable
}
if (checksumOut != null || out != null) {
datanode.metrics.addFlushNanos(flushTotalNanos);
+ if (isSync) {
+ datanode.metrics.incrFsyncCount();
+ }
}
}
@@ -357,7 +357,7 @@ class BlockReceiver implements Closeable
private void handleMirrorOutError(IOException ioe) throws IOException {
String bpid = block.getBlockPoolId();
LOG.info(datanode.getDNRegistrationForBP(bpid)
- + ":Exception writing block " + block + " to mirror " + mirrorAddr, ioe);
+ + ":Exception writing " + block + " to mirror " + mirrorAddr, ioe);
if (Thread.interrupted()) { // shut down if the thread is interrupted
throw ioe;
} else { // encounter an error while writing to mirror
@@ -379,16 +379,16 @@ class BlockReceiver implements Closeable
LOG.warn("Checksum error in block " + block + " from " + inAddr, ce);
if (srcDataNode != null) {
try {
- LOG.info("report corrupt block " + block + " from datanode " +
+ LOG.info("report corrupt " + block + " from datanode " +
srcDataNode + " to namenode");
datanode.reportRemoteBadBlock(srcDataNode, block);
} catch (IOException e) {
- LOG.warn("Failed to report bad block " + block +
+ LOG.warn("Failed to report bad " + block +
" from datanode " + srcDataNode + " to namenode");
}
}
- throw new IOException("Unexpected checksum mismatch " +
- "while writing " + block + " from " + inAddr);
+ throw new IOException("Unexpected checksum mismatch while writing "
+ + block + " from " + inAddr);
}
}
@@ -438,8 +438,10 @@ class BlockReceiver implements Closeable
int len = header.getDataLen();
boolean syncBlock = header.getSyncBlock();
- // make sure the block gets sync'ed upon close
- this.syncOnClose |= syncBlock && lastPacketInBlock;
+ // avoid double sync'ing on close
+ if (syncBlock && lastPacketInBlock) {
+ this.syncOnClose = false;
+ }
// update received bytes
long firstByteInBlock = offsetInBlock;
@@ -448,11 +450,11 @@ class BlockReceiver implements Closeable
replicaInfo.setNumBytes(offsetInBlock);
}
- // put in queue for pending acks
- if (responder != null) {
- ((PacketResponder)responder.getRunnable()).enqueue(seqno,
- lastPacketInBlock, offsetInBlock);
- }
+ // put in queue for pending acks, unless sync was requested
+ if (responder != null && !syncBlock) {
+ ((PacketResponder) responder.getRunnable()).enqueue(seqno,
+ lastPacketInBlock, offsetInBlock);
+ }
//First write the packet to the mirror:
if (mirrorOut != null && !mirrorError) {
@@ -471,8 +473,8 @@ class BlockReceiver implements Closeable
if(LOG.isDebugEnabled()) {
LOG.debug("Receiving an empty packet or the end of the block " + block);
}
- // flush unless close() would flush anyway
- if (syncBlock && !lastPacketInBlock) {
+ // sync block if requested
+ if (syncBlock) {
flushOrSync(true);
}
} else {
@@ -518,7 +520,7 @@ class BlockReceiver implements Closeable
// If this is a partial chunk, then read in pre-existing checksum
if (firstByteInBlock % bytesPerChecksum != 0) {
LOG.info("Packet starts at " + firstByteInBlock +
- " for block " + block +
+ " for " + block +
" which is not a multiple of bytesPerChecksum " +
bytesPerChecksum);
long offsetInChecksum = BlockMetadataHeader.getHeaderSize() +
@@ -563,8 +565,8 @@ class BlockReceiver implements Closeable
checksumBuf.arrayOffset() + checksumBuf.position(),
checksumLen);
}
- /// flush entire packet, sync unless close() will sync
- flushOrSync(syncBlock && !lastPacketInBlock);
+ /// flush entire packet, sync if requested
+ flushOrSync(syncBlock);
replicaInfo.setLastChecksumAndDataLen(
offsetInBlock, lastChunkChecksum
@@ -580,6 +582,13 @@ class BlockReceiver implements Closeable
}
}
+ // if sync was requested, put in queue for pending acks here
+ // (after the fsync finished)
+ if (responder != null && syncBlock) {
+ ((PacketResponder) responder.getRunnable()).enqueue(seqno,
+ lastPacketInBlock, offsetInBlock);
+ }
+
if (throttler != null) { // throttle I/O
throttler.throttle(len);
}
@@ -662,7 +671,7 @@ class BlockReceiver implements Closeable
}
} catch (IOException ioe) {
- LOG.info("Exception in receiveBlock for " + block, ioe);
+ LOG.info("Exception for " + block, ioe);
throw ioe;
} finally {
if (!responderClosed) { // Abnormal termination of the flow above
@@ -733,10 +742,9 @@ class BlockReceiver implements Closeable
int checksumSize = diskChecksum.getChecksumSize();
blkoff = blkoff - sizePartialChunk;
LOG.info("computePartialChunkCrc sizePartialChunk " +
- sizePartialChunk +
- " block " + block +
- " offset in block " + blkoff +
- " offset in metafile " + ckoff);
+ sizePartialChunk + " " + block +
+ " block offset " + blkoff +
+ " metafile offset " + ckoff);
// create an input stream from the block file
// and read in partial crc chunk into temporary buffer
@@ -758,7 +766,7 @@ class BlockReceiver implements Closeable
partialCrc = DataChecksum.newDataChecksum(
diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
partialCrc.update(buf, 0, sizePartialChunk);
- LOG.info("Read in partial CRC chunk from disk for block " + block);
+ LOG.info("Read in partial CRC chunk from disk for " + block);
// paranoia! verify that the pre-computed crc matches what we
// recalculated just now
@@ -973,7 +981,7 @@ class BlockReceiver implements Closeable
"HDFS_WRITE", clientname, offset,
dnR.getStorageID(), block, endTime-startTime));
} else {
- LOG.info("Received block " + block + " of size "
+ LOG.info("Received " + block + " size "
+ block.getNumBytes() + " from " + inAddr);
}
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Thu Nov 8 19:09:46 2012
@@ -503,7 +503,7 @@ class BlockSender implements java.io.Clo
* part of a block and then decides not to read the rest (but leaves
* the socket open).
*/
- LOG.info("BlockSender.sendChunks() exception: ", e);
+ LOG.info("exception: ", e);
} else {
/* Exception while writing to the client. Connection closure from
* the other end is mostly the case and we do not care much about
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Thu Nov 8 19:09:46 2012
@@ -481,8 +481,7 @@ public class DataNode extends Configured
blockScanner = new DataBlockScanner(this, data, conf);
blockScanner.start();
} else {
- LOG.info("Periodic Block Verification scan is disabled because " +
- reason + ".");
+ LOG.info("Periodic Block Verification scan disabled because " + reason);
}
}
@@ -511,7 +510,7 @@ public class DataNode extends Configured
directoryScanner.start();
} else {
LOG.info("Periodic Directory Tree Verification scan is disabled because " +
- reason + ".");
+ reason);
}
}
@@ -1095,6 +1094,12 @@ public class DataNode extends Configured
}
}
+ // We need to make a copy of the original blockPoolManager#offerServices to
+ // make sure blockPoolManager#shutDownAll() can still access all the
+ // BPOfferServices, since after setting DataNode#shouldRun to false the
+ // offerServices may be modified.
+ BPOfferService[] bposArray = this.blockPoolManager == null ? null
+ : this.blockPoolManager.getAllNamenodeThreads();
this.shouldRun = false;
shutdownPeriodicScanners();
@@ -1141,7 +1146,7 @@ public class DataNode extends Configured
if(blockPoolManager != null) {
try {
- this.blockPoolManager.shutDownAll();
+ this.blockPoolManager.shutDownAll(bposArray);
} catch (InterruptedException ie) {
LOG.warn("Received exception in BlockPoolManager#shutDownAll: ", ie);
}
@@ -1256,7 +1261,7 @@ public class DataNode extends Configured
xfersBuilder.append(xferTargets[i]);
xfersBuilder.append(" ");
}
- LOG.info(bpReg + " Starting thread to transfer block " +
+ LOG.info(bpReg + " Starting thread to transfer " +
block + " to " + xfersBuilder);
}
@@ -2043,7 +2048,7 @@ public class DataNode extends Configured
ExtendedBlock block = rb.getBlock();
DatanodeInfo[] targets = rb.getLocations();
- LOG.info(who + " calls recoverBlock(block=" + block
+ LOG.info(who + " calls recoverBlock(" + block
+ ", targets=[" + Joiner.on(", ").join(targets) + "]"
+ ", newGenerationStamp=" + rb.getNewGenerationStamp() + ")");
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Thu Nov 8 19:09:46 2012
@@ -155,11 +155,11 @@ public class DataStorage extends Storage
break;
case NON_EXISTENT:
// ignore this storage
- LOG.info("Storage directory " + dataDir + " does not exist.");
+ LOG.info("Storage directory " + dataDir + " does not exist");
it.remove();
continue;
case NOT_FORMATTED: // format
- LOG.info("Storage directory " + dataDir + " is not formatted.");
+ LOG.info("Storage directory " + dataDir + " is not formatted");
LOG.info("Formatting ...");
format(sd, nsInfo);
break;
@@ -482,7 +482,7 @@ public class DataStorage extends Storage
// 5. Rename <SD>/previous.tmp to <SD>/previous
rename(tmpDir, prevDir);
- LOG.info("Upgrade of " + sd.getRoot()+ " is complete.");
+ LOG.info("Upgrade of " + sd.getRoot()+ " is complete");
addBlockPoolStorage(nsInfo.getBlockPoolID(), bpStorage);
}
@@ -556,7 +556,7 @@ public class DataStorage extends Storage
rename(prevDir, curDir);
// delete tmp dir
deleteDir(tmpDir);
- LOG.info("Rollback of " + sd.getRoot() + " is complete.");
+ LOG.info("Rollback of " + sd.getRoot() + " is complete");
}
/**
@@ -596,9 +596,9 @@ public class DataStorage extends Storage
deleteDir(bbwDir);
}
} catch(IOException ex) {
- LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex);
+ LOG.error("Finalize upgrade for " + dataDirPath + " failed", ex);
}
- LOG.info("Finalize upgrade for " + dataDirPath + " is complete.");
+ LOG.info("Finalize upgrade for " + dataDirPath + " is complete");
}
@Override
public String toString() { return "Finalize " + dataDirPath; }
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Thu Nov 8 19:09:46 2012
@@ -170,7 +170,7 @@ class DataXceiver extends Receiver imple
} catch (InvalidMagicNumberException imne) {
LOG.info("Failed to read expected encryption handshake from client " +
"at " + s.getInetAddress() + ". Perhaps the client is running an " +
- "older version of Hadoop which does not support encryption.");
+ "older version of Hadoop which does not support encryption");
return;
}
input = encryptedStreams.in;
@@ -367,9 +367,8 @@ class DataXceiver extends Receiver imple
// make a copy here.
final ExtendedBlock originalBlock = new ExtendedBlock(block);
block.setNumBytes(dataXceiverServer.estimateBlockSize);
- LOG.info("Receiving block " + block +
- " src: " + remoteAddress +
- " dest: " + localAddress);
+ LOG.info("Receiving " + block + " src: " + remoteAddress + " dest: "
+ + localAddress);
// reply to upstream datanode or client
final DataOutputStream replyOut = new DataOutputStream(
@@ -478,9 +477,9 @@ class DataXceiver extends Receiver imple
block + " to mirror " + mirrorNode + ": " + e);
throw e;
} else {
- LOG.info(datanode + ":Exception transfering block " +
+ LOG.info(datanode + ":Exception transfering " +
block + " to mirror " + mirrorNode +
- ". continuing without the mirror.", e);
+ "- continuing without the mirror", e);
}
}
}
@@ -528,10 +527,8 @@ class DataXceiver extends Receiver imple
if (isDatanode ||
stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
datanode.closeBlock(block, DataNode.EMPTY_DEL_HINT);
- LOG.info("Received block " + block +
- " src: " + remoteAddress +
- " dest: " + localAddress +
- " of size " + block.getNumBytes());
+ LOG.info("Received " + block + " src: " + remoteAddress + " dest: "
+ + localAddress + " of size " + block.getNumBytes());
}
@@ -674,7 +671,7 @@ class DataXceiver extends Receiver imple
datanode.metrics.incrBytesRead((int) read);
datanode.metrics.incrBlocksRead();
- LOG.info("Copied block " + block + " to " + s.getRemoteSocketAddress());
+ LOG.info("Copied " + block + " to " + s.getRemoteSocketAddress());
} catch (IOException ioe) {
isOpSuccess = false;
LOG.info("opCopyBlock " + block + " received exception " + ioe);
@@ -797,8 +794,7 @@ class DataXceiver extends Receiver imple
// notify name node
datanode.notifyNamenodeReceivedBlock(block, delHint);
- LOG.info("Moved block " + block +
- " from " + s.getRemoteSocketAddress());
+ LOG.info("Moved " + block + " from " + s.getRemoteSocketAddress());
} catch (IOException ioe) {
opStatus = ERROR;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java Thu Nov 8 19:09:46 2012
@@ -38,6 +38,8 @@ import org.mortbay.jetty.security.SslSoc
import javax.net.ssl.SSLServerSocketFactory;
+import com.google.common.annotations.VisibleForTesting;
+
/**
* Utility class to start a datanode in a secure cluster, first obtaining
* privileged resources before main startup and handing them to the datanode.
@@ -73,6 +75,25 @@ public class SecureDataNodeStarter imple
// Stash command-line arguments for regular datanode
args = context.getArguments();
+ sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
+ resources = getSecureResources(sslFactory, conf);
+ }
+
+ @Override
+ public void start() throws Exception {
+ System.err.println("Starting regular datanode initialization");
+ DataNode.secureMain(args, resources);
+ }
+
+ @Override public void destroy() {
+ sslFactory.destroy();
+ }
+
+ @Override public void stop() throws Exception { /* Nothing to do */ }
+
+ @VisibleForTesting
+ public static SecureResources getSecureResources(final SSLFactory sslFactory,
+ Configuration conf) throws Exception {
// Obtain secure port for data streaming to datanode
InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf);
int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
@@ -85,13 +106,12 @@ public class SecureDataNodeStarter imple
// Check that we got the port we need
if (ss.getLocalPort() != streamingAddr.getPort()) {
throw new RuntimeException("Unable to bind on specified streaming port in secure " +
- "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
+ "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
}
// Obtain secure listener for web server
Connector listener;
if (HttpConfig.isSecure()) {
- sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
try {
sslFactory.init();
} catch (GeneralSecurityException ex) {
@@ -126,18 +146,7 @@ public class SecureDataNodeStarter imple
}
System.err.println("Opened streaming server at " + streamingAddr);
System.err.println("Opened info server at " + infoSocAddr);
- resources = new SecureResources(ss, listener);
+ return new SecureResources(ss, listener);
}
- @Override
- public void start() throws Exception {
- System.err.println("Starting regular datanode initialization");
- DataNode.secureMain(args, resources);
- }
-
- @Override public void destroy() {
- sslFactory.destroy();
- }
-
- @Override public void stop() throws Exception { /* Nothing to do */ }
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java Thu Nov 8 19:09:46 2012
@@ -136,7 +136,7 @@ class FsDatasetAsyncDiskService {
if (executors == null) {
LOG.warn("AsyncDiskService has already shut down.");
} else {
- LOG.info("Shutting down all async disk service threads...");
+ LOG.info("Shutting down all async disk service threads");
for (Map.Entry<File, ThreadPoolExecutor> e : executors.entrySet()) {
e.getValue().shutdown();
@@ -144,7 +144,7 @@ class FsDatasetAsyncDiskService {
// clear the executor map so that calling execute again will fail.
executors = null;
- LOG.info("All async disk service threads have been shut down.");
+ LOG.info("All async disk service threads have been shut down");
}
}
@@ -154,7 +154,7 @@ class FsDatasetAsyncDiskService {
*/
void deleteAsync(FsVolumeImpl volume, File blockFile, File metaFile,
ExtendedBlock block) {
- LOG.info("Scheduling block " + block.getLocalBlock()
+ LOG.info("Scheduling " + block.getLocalBlock()
+ " file " + blockFile + " for deletion");
ReplicaFileDeleteTask deletionTask = new ReplicaFileDeleteTask(
volume, blockFile, metaFile, block);
@@ -198,8 +198,8 @@ class FsDatasetAsyncDiskService {
datanode.notifyNamenodeDeletedBlock(block);
}
volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
- LOG.info("Deleted block " + block.getBlockPoolId() + " "
- + block.getLocalBlock() + " at file " + blockFile);
+ LOG.info("Deleted " + block.getBlockPoolId() + " "
+ + block.getLocalBlock() + " file " + blockFile);
}
}
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java Thu Nov 8 19:09:46 2012
@@ -425,7 +425,7 @@ class FsDatasetImpl implements FsDataset
return;
}
if (newlen > oldlen) {
- throw new IOException("Cannout truncate block to from oldlen (=" + oldlen
+ throw new IOException("Cannot truncate block to from oldlen (=" + oldlen
+ ") to newlen (=" + newlen + ")");
}
@@ -481,7 +481,7 @@ class FsDatasetImpl implements FsDataset
" should be greater than the replica " + b + "'s generation stamp");
}
ReplicaInfo replicaInfo = getReplicaInfo(b);
- LOG.info("Appending to replica " + replicaInfo);
+ LOG.info("Appending to " + replicaInfo);
if (replicaInfo.getState() != ReplicaState.FINALIZED) {
throw new ReplicaNotFoundException(
ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
@@ -689,7 +689,7 @@ class FsDatasetImpl implements FsDataset
public synchronized ReplicaInPipeline recoverRbw(ExtendedBlock b,
long newGS, long minBytesRcvd, long maxBytesRcvd)
throws IOException {
- LOG.info("Recover the RBW replica " + b);
+ LOG.info("Recover RBW replica " + b);
ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
@@ -700,7 +700,7 @@ class FsDatasetImpl implements FsDataset
}
ReplicaBeingWritten rbw = (ReplicaBeingWritten)replicaInfo;
- LOG.info("Recovering replica " + rbw);
+ LOG.info("Recovering " + rbw);
// Stop the previous writer
rbw.stopWriter();
@@ -736,8 +736,8 @@ class FsDatasetImpl implements FsDataset
final long blockId = b.getBlockId();
final long expectedGs = b.getGenerationStamp();
final long visible = b.getNumBytes();
- LOG.info("Convert replica " + b
- + " from Temporary to RBW, visible length=" + visible);
+ LOG.info("Convert " + b + " from Temporary to RBW, visible length="
+ + visible);
final ReplicaInPipeline temp;
{
@@ -1415,8 +1415,7 @@ class FsDatasetImpl implements FsDataset
static ReplicaRecoveryInfo initReplicaRecovery(String bpid,
ReplicaMap map, Block block, long recoveryId) throws IOException {
final ReplicaInfo replica = map.get(bpid, block.getBlockId());
- LOG.info("initReplicaRecovery: block=" + block
- + ", recoveryId=" + recoveryId
+ LOG.info("initReplicaRecovery: " + block + ", recoveryId=" + recoveryId
+ ", replica=" + replica);
//check replica
@@ -1485,7 +1484,7 @@ class FsDatasetImpl implements FsDataset
//get replica
final String bpid = oldBlock.getBlockPoolId();
final ReplicaInfo replica = volumeMap.get(bpid, oldBlock.getBlockId());
- LOG.info("updateReplica: block=" + oldBlock
+ LOG.info("updateReplica: " + oldBlock
+ ", recoveryId=" + recoveryId
+ ", length=" + newlength
+ ", replica=" + replica);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1407217&r1=1407216&r2=1407217&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Thu Nov 8 19:09:46 2012
@@ -78,10 +78,6 @@ public class BackupNode extends NameNode
String nnHttpAddress;
/** Checkpoint manager */
Checkpointer checkpointManager;
- /** ClusterID to which BackupNode belongs to */
- String clusterId;
- /** Block pool Id of the peer namenode of this BackupNode */
- String blockPoolId;
BackupNode(Configuration conf, NamenodeRole role) throws IOException {
super(conf, role);
@@ -145,6 +141,7 @@ public class BackupNode extends NameNode
CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
NamespaceInfo nsInfo = handshake(conf);
super.initialize(conf);
+ namesystem.setBlockPoolId(nsInfo.getBlockPoolID());
if (false == namesystem.isInSafeMode()) {
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
@@ -154,9 +151,6 @@ public class BackupNode extends NameNode
// therefore lease hard limit should never expire.
namesystem.leaseManager.setLeasePeriod(
HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
-
- clusterId = nsInfo.getClusterID();
- blockPoolId = nsInfo.getBlockPoolID();
// register with the active name-node
registerWith(nsInfo);
@@ -219,7 +213,7 @@ public class BackupNode extends NameNode
}
/* @Override */// NameNode
- public boolean setSafeMode(@SuppressWarnings("unused") SafeModeAction action)
+ public boolean setSafeMode(SafeModeAction action)
throws IOException {
throw new UnsupportedActionException("setSafeMode");
}
@@ -415,14 +409,6 @@ public class BackupNode extends NameNode
return nsInfo;
}
- String getBlockPoolId() {
- return blockPoolId;
- }
-
- String getClusterId() {
- return clusterId;
- }
-
@Override
protected NameNodeHAContext createHAContext() {
return new BNHAContext();