You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-dev@hadoop.apache.org by Apache Hudson Server <hu...@hudson.apache.org> on 2010/12/03 17:00:02 UTC

Hadoop-Hdfs-trunk - Build # 509 - Still Failing

See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/509/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 859145 lines...]
    [junit] 2010-12-03 16:02:11,032 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-03 16:02:11,033 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-03 16:02:11,033 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(770)) - Shutting down DataNode 0
    [junit] 2010-12-03 16:02:11,135 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 55411
    [junit] 2010-12-03 16:02:11,135 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 55411: exiting
    [junit] 2010-12-03 16:02:11,135 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 55411
    [junit] 2010-12-03 16:02:11,136 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-03 16:02:11,136 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:43279, storageID=DS-78845373-127.0.1.1-43279-1291392120232, infoPort=59503, ipcPort=55411):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-03 16:02:11,136 INFO  datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-12-03 16:02:11,237 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-03 16:02:11,237 INFO  datanode.DataNode (DataNode.java:run(1442)) - DatanodeRegistration(127.0.0.1:43279, storageID=DS-78845373-127.0.1.1-43279-1291392120232, infoPort=59503, ipcPort=55411):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-03 16:02:11,237 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 55411
    [junit] 2010-12-03 16:02:11,238 INFO  datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-03 16:02:11,238 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-03 16:02:11,238 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-03 16:02:11,239 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-03 16:02:11,341 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-03 16:02:11,341 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-03 16:02:11,341 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 2 
    [junit] 2010-12-03 16:02:11,343 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 53180
    [junit] 2010-12-03 16:02:11,343 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 53180: exiting
    [junit] 2010-12-03 16:02:11,343 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 53180
    [junit] 2010-12-03 16:02:11,343 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 53180: exiting
    [junit] 2010-12-03 16:02:11,344 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 53180: exiting
    [junit] 2010-12-03 16:02:11,344 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 53180: exiting
    [junit] 2010-12-03 16:02:11,344 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-03 16:02:11,345 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 53180: exiting
    [junit] 2010-12-03 16:02:11,345 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 53180: exiting
    [junit] 2010-12-03 16:02:11,345 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 53180: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.159 sec
    [junit] 2010-12-03 16:02:11,348 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 53180: exiting
    [junit] 2010-12-03 16:02:11,348 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 53180: exiting
    [junit] 2010-12-03 16:02:11,348 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 53180: exiting

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: Tests failed!

Total time: 266 minutes 45 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
15 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1332)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1350)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1332)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1350)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileAppend4.testRecoverFinalizedBlock

Error Message:
test timed out after 60000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
	at java.io.FileInputStream.readBytes(Native Method)
	at java.io.FileInputStream.read(FileInputStream.java:199)
	at java.io.BufferedInputStream.read1(BufferedInputStream.java:256)
	at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
	at java.io.BufferedInputStream.fill(BufferedInputStream.java:218)
	at java.io.BufferedInputStream.read1(BufferedInputStream.java:258)
	at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
	at sun.security.provider.SeedGenerator$URLSeedGenerator.getSeedByte(SeedGenerator.java:453)
	at sun.security.provider.SeedGenerator.getSeedBytes(SeedGenerator.java:123)
	at sun.security.provider.SeedGenerator.generateSeed(SeedGenerator.java:118)
	at sun.security.provider.SecureRandom.engineGenerateSeed(SecureRandom.java:114)
	at sun.security.provider.SecureRandom.engineNextBytes(SecureRandom.java:171)
	at java.security.SecureRandom.nextBytes(SecureRandom.java:433)
	at java.security.SecureRandom.next(SecureRandom.java:455)
	at java.util.Random.nextLong(Random.java:284)
	at org.mortbay.jetty.servlet.HashSessionIdManager.doStart(HashSessionIdManager.java:139)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.servlet.AbstractSessionManager.doStart(AbstractSessionManager.java:168)
	at org.mortbay.jetty.servlet.HashSessionManager.doStart(HashSessionManager.java:67)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.servlet.SessionHandler.doStart(SessionHandler.java:115)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.handler.HandlerWrapper.doStart(HandlerWrapper.java:130)
	at org.mortbay.jetty.handler.ContextHandler.startContext(ContextHandler.java:537)
	at org.mortbay.jetty.servlet.Context.startContext(Context.java:136)
	at org.mortbay.jetty.webapp.WebAppContext.startContext(WebAppContext.java:1234)
	at org.mortbay.jetty.handler.ContextHandler.doStart(ContextHandler.java:517)
	at org.mortbay.jetty.webapp.WebAppContext.doStart(WebAppContext.java:460)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.handler.HandlerCollection.doStart(HandlerCollection.java:152)
	at org.mortbay.jetty.handler.ContextHandlerCollection.doStart(ContextHandlerCollection.java:156)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.handler.HandlerWrapper.doStart(HandlerWrapper.java:130)
	at org.mortbay.jetty.Server.doStart(Server.java:222)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.apache.hadoop.http.HttpServer.start(HttpServer.java:618)
	at org.apache.hadoop.hdfs.server.namenode.NameNode$1.run(NameNode.java:516)
	at org.apache.hadoop.hdfs.server.namenode.NameNode$1.run(NameNode.java:461)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:396)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1115)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.startHttpServer(NameNode.java:461)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.activate(NameNode.java:405)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:389)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:578)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:571)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1534)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:445)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileAppend4.__CLR3_0_21z1ppcxud(TestFileAppend4.java:151)
	at org.apache.hadoop.hdfs.TestFileAppend4.testRecoverFinalizedBlock(TestFileAppend4.java:150)


FAILED:  org.apache.hadoop.hdfs.TestFileAppend4.testCompleteOtherLeaseHoldersFile

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1332)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1350)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileAppend4.__CLR3_0_269ddf9xvh(TestFileAppend4.java:222)
	at org.apache.hadoop.hdfs.TestFileAppend4.testCompleteOtherLeaseHoldersFile(TestFileAppend4.java:221)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1558)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1501)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1468)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_29j3j5brsl(TestBalancer.java:327)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0(TestBalancer.java:324)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancerDefaultConstructor(TestBalancer.java:279)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancerDefaultConstructor(TestBalancer.java:376)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_2g13gq9rsu(TestBalancer.java:344)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2(TestBalancer.java:341)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.integrationTest(TestBalancer.java:319)
	at org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.__CLR3_0_2wspf0nr53(TestBlockTokenWithDFS.java:529)
	at org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End(TestBlockTokenWithDFS.java:526)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of c1c9322af2cd37560e0750eb9d266a40 but expecting 872232f29d4fb82a18c29774266db9e7

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of c1c9322af2cd37560e0750eb9d266a40 but expecting 872232f29d4fb82a18c29774266db9e7
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1062)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tdc(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2lttiju10wk(TestBlockRecovery.java:165)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedReplicas(TestBlockRecovery.java:153)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRbwReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2c2lg1h10x2(TestBlockRecovery.java:204)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRbwReplicas(TestBlockRecovery.java:190)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRwrReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_29tewcb10xl(TestBlockRecovery.java:243)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRwrReplicas(TestBlockRecovery.java:229)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBWReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2cqk51310y4(TestBlockRecovery.java:281)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBWReplicas(TestBlockRecovery.java:269)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBW_RWRReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2396azp10yh(TestBlockRecovery.java:305)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBW_RWRReplicas(TestBlockRecovery.java:293)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRWRReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2ahdlbx10yt(TestBlockRecovery.java:329)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRWRReplicas(TestBlockRecovery.java:317)




Hadoop-Hdfs-trunk - Build # 642 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/642/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 730857 lines...]
    [junit] 2011-04-19 12:21:35,904 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-19 12:21:35,905 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-19 12:21:35,905 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:44943, storageID=DS-698805625-127.0.1.1-44943-1303215695276, infoPort=45861, ipcPort=41554):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-19 12:21:35,905 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 41554
    [junit] 2011-04-19 12:21:35,905 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-19 12:21:35,905 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-19 12:21:35,905 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-19 12:21:35,906 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-19 12:21:35,906 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-19 12:21:36,007 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 39285
    [junit] 2011-04-19 12:21:36,007 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 39285: exiting
    [junit] 2011-04-19 12:21:36,007 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 39285
    [junit] 2011-04-19 12:21:36,007 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-19 12:21:36,007 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-19 12:21:36,007 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:49559, storageID=DS-1905199131-127.0.1.1-49559-1303215695148, infoPort=49257, ipcPort=39285):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-19 12:21:36,009 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-19 12:21:36,110 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-19 12:21:36,110 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:49559, storageID=DS-1905199131-127.0.1.1-49559-1303215695148, infoPort=49257, ipcPort=39285):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-19 12:21:36,110 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 39285
    [junit] 2011-04-19 12:21:36,110 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-19 12:21:36,110 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-19 12:21:36,110 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-19 12:21:36,111 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-19 12:21:36,212 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-19 12:21:36,212 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(573)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 4 4 
    [junit] 2011-04-19 12:21:36,212 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2896)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-19 12:21:36,213 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 55002
    [junit] 2011-04-19 12:21:36,214 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 55002: exiting
    [junit] 2011-04-19 12:21:36,214 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 55002
    [junit] 2011-04-19 12:21:36,214 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 16, Failures: 0, Errors: 0, Time elapsed: 97.315 sec

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:747: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:505: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:688: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:662: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:730: Tests failed!

Total time: 49 minutes 4 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_17

Error Message:
Failed to add a datanode: nodes.length != original.length + 1, nodes=[127.0.0.1:50271], original=[127.0.0.1:50271]

Stack Trace:
java.io.IOException: Failed to add a datanode: nodes.length != original.length + 1, nodes=[127.0.0.1:50271], original=[127.0.0.1:50271]
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.findNewDatanode(DFSOutputStream.java:768)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.addDatanode2ExistingPipeline(DFSOutputStream.java:824)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.setupPipelineForAppendOrRecovery(DFSOutputStream.java:918)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.processDatanodeError(DFSOutputStream.java:731)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:415)


REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks expected:<2> but was:<1>
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2fte182xp1(TestBlockReport.java:457)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09(TestBlockReport.java:429)




Hadoop-Hdfs-trunk - Build # 641 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/641/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 711231 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-40822 / http-40823 / https-40824
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:40823
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.481 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.333 sec
   [cactus] Tomcat 5.x started on port [40823]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.329 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.318 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.859 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 62 minutes 4 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 640 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/640/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 722627 lines...]
    [junit] 
    [junit] 2011-04-17 12:35:04,371 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-17 12:35:04,371 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-17 12:35:04,371 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:53934, storageID=DS-1753167764-127.0.1.1-53934-1303043703615, infoPort=45352, ipcPort=33069):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-17 12:35:04,372 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 33069
    [junit] 2011-04-17 12:35:04,372 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-17 12:35:04,372 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-17 12:35:04,372 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-17 12:35:04,372 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-17 12:35:04,373 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-17 12:35:04,473 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 46160
    [junit] 2011-04-17 12:35:04,474 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 46160: exiting
    [junit] 2011-04-17 12:35:04,474 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 46160
    [junit] 2011-04-17 12:35:04,474 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-17 12:35:04,474 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:45883, storageID=DS-899432502-127.0.1.1-45883-1303043703453, infoPort=52177, ipcPort=46160):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-17 12:35:04,474 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-17 12:35:04,474 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-17 12:35:04,575 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:45883, storageID=DS-899432502-127.0.1.1-45883-1303043703453, infoPort=52177, ipcPort=46160):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-17 12:35:04,575 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 46160
    [junit] 2011-04-17 12:35:04,575 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-17 12:35:04,575 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-17 12:35:04,575 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-17 12:35:04,575 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-17 12:35:04,676 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-17 12:35:04,676 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2896)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-17 12:35:04,676 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(573)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 3 
    [junit] 2011-04-17 12:35:04,678 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 40108
    [junit] 2011-04-17 12:35:04,678 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 40108: exiting
    [junit] 2011-04-17 12:35:04,678 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 40108
    [junit] 2011-04-17 12:35:04,678 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 16, Failures: 0, Errors: 0, Time elapsed: 99.107 sec

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:747: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:505: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:688: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:662: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:730: Tests failed!

Total time: 61 minutes 40 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_18

Error Message:
Failed to add a datanode: nodes.length != original.length + 1, nodes=[127.0.0.1:54748], original=[127.0.0.1:54748]

Stack Trace:
java.io.IOException: Failed to add a datanode: nodes.length != original.length + 1, nodes=[127.0.0.1:54748], original=[127.0.0.1:54748]
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.findNewDatanode(DFSOutputStream.java:768)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.addDatanode2ExistingPipeline(DFSOutputStream.java:824)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.setupPipelineForAppendOrRecovery(DFSOutputStream.java:918)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.processDatanodeError(DFSOutputStream.java:731)
	at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:415)




Hadoop-Hdfs-trunk - Build # 639 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/639/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 1819 lines...]
    [javac]                                              ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java:93: cannot find symbol
    [javac] symbol  : class TestCmd
    [javac] location: class org.apache.hadoop.cli.TestHDFSCLI
    [javac]   protected Result execute(TestCmd cmd) throws Exception {
    [javac]                            ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/hdfs/org/apache/hadoop/cli/CmdFactoryDFS.java:32: cannot find symbol
    [javac] symbol  : variable DFSADMIN
    [javac] location: class org.apache.hadoop.cli.CmdFactoryDFS
    [javac]       case DFSADMIN:
    [javac]            ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/hdfs/org/apache/hadoop/cli/CmdFactoryDFS.java:33: package CLICommands does not exist
    [javac]         executor = new CLICommands.FSCmdExecutor(tag, new DFSAdmin());
    [javac]                                   ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/hdfs/org/apache/hadoop/cli/CmdFactoryDFS.java:36: cannot find symbol
    [javac] symbol  : variable CmdFactory
    [javac] location: class org.apache.hadoop.cli.CmdFactoryDFS
    [javac]         executor = CmdFactory.getCommandExecutor(cmd, tag);
    [javac]                    ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java:355: cannot find symbol
    [javac] symbol  : class TestCmd
    [javac] location: class org.apache.hadoop.cli.util.CLITestData
    [javac]           new CLITestData.TestCmd(cmd, CLITestData.TestCmd.CommandType.DFSADMIN),
    [javac]                          ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java:355: cannot find symbol
    [javac] symbol  : variable TestCmd
    [javac] location: class org.apache.hadoop.cli.util.CLITestData
    [javac]           new CLITestData.TestCmd(cmd, CLITestData.TestCmd.CommandType.DFSADMIN),
    [javac]                                                   ^
    [javac] Note: Some input files use or override a deprecated API.
    [javac] Note: Recompile with -Xlint:deprecation for details.
    [javac] 11 errors

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:412: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:446: Compile failed; see the compiler error output for details.

Total time: 44 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================


mv: cannot stat `build/*.tar.gz': No such file or directory
mv: cannot stat `build/test/findbugs': No such file or directory
Build Failed
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
No tests ran.

Hadoop-Hdfs-trunk - Build # 638 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/638/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 713453 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-44211 / http-44212 / https-44213
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:44212
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.459 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.31 sec
   [cactus] Tomcat 5.x started on port [44212]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.324 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.33 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.867 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 52 minutes 0 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 637 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/637/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 695403 lines...]
    [junit] 
    [junit] 2011-04-14 12:25:10,208 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-14 12:25:10,208 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-14 12:25:10,208 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-14 12:25:10,209 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:53070, storageID=DS-1559300299-127.0.1.1-53070-1302783909591, infoPort=59092, ipcPort=35341):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-14 12:25:10,209 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 35341
    [junit] 2011-04-14 12:25:10,209 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-14 12:25:10,209 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-14 12:25:10,209 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-14 12:25:10,209 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-14 12:25:10,210 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-14 12:25:10,310 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 53309
    [junit] 2011-04-14 12:25:10,311 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 53309: exiting
    [junit] 2011-04-14 12:25:10,311 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 53309
    [junit] 2011-04-14 12:25:10,311 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-14 12:25:10,311 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-14 12:25:10,311 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:55962, storageID=DS-1033477654-127.0.1.1-55962-1302783909440, infoPort=41539, ipcPort=53309):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-14 12:25:10,313 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-14 12:25:10,314 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-14 12:25:10,315 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:55962, storageID=DS-1033477654-127.0.1.1-55962-1302783909440, infoPort=41539, ipcPort=53309):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-14 12:25:10,315 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 53309
    [junit] 2011-04-14 12:25:10,315 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-14 12:25:10,315 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-14 12:25:10,315 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-14 12:25:10,315 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-14 12:25:10,427 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2908)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-14 12:25:10,427 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-14 12:25:10,427 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(573)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3 
    [junit] 2011-04-14 12:25:10,429 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 60224
    [junit] 2011-04-14 12:25:10,430 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 60224: exiting
    [junit] 2011-04-14 12:25:10,430 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 60224
    [junit] 2011-04-14 12:25:10,430 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 16, Failures: 0, Errors: 0, Time elapsed: 98.453 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 51 minutes 48 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
All tests passed

Hadoop-Hdfs-trunk - Build # 636 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/636/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 715049 lines...]
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-13 12:49:23,947 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-13 12:49:23,948 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-13 12:49:23,948 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:43506, storageID=DS-1486568985-127.0.1.1-43506-1302698963166, infoPort=48490, ipcPort=54645):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-13 12:49:23,948 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 54645
    [junit] 2011-04-13 12:49:23,948 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-13 12:49:23,948 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-13 12:49:23,949 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-13 12:49:23,949 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-13 12:49:23,949 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-13 12:49:23,950 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 35691
    [junit] 2011-04-13 12:49:23,950 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 35691: exiting
    [junit] 2011-04-13 12:49:23,951 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 35691
    [junit] 2011-04-13 12:49:23,951 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-13 12:49:23,951 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:50046, storageID=DS-591968703-127.0.1.1-50046-1302698963017, infoPort=48358, ipcPort=35691):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-13 12:49:23,951 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-13 12:49:23,952 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-13 12:49:23,952 INFO  datanode.DataNode (DataNode.java:run(1497)) - DatanodeRegistration(127.0.0.1:50046, storageID=DS-591968703-127.0.1.1-50046-1302698963017, infoPort=48358, ipcPort=35691):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-13 12:49:23,952 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 35691
    [junit] 2011-04-13 12:49:23,952 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-13 12:49:23,952 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-13 12:49:23,953 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-13 12:49:23,953 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-13 12:49:24,054 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2908)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-13 12:49:24,054 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-13 12:49:24,054 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(573)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 3 6 
    [junit] 2011-04-13 12:49:24,056 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 60243
    [junit] 2011-04-13 12:49:24,056 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 60243: exiting
    [junit] 2011-04-13 12:49:24,056 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 60243
    [junit] 2011-04-13 12:49:24,057 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 16, Failures: 0, Errors: 0, Time elapsed: 99.675 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 76 minutes 4 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.fs.TestHDFSFileContextMainOperations.testCreateFlagAppendExistingFile

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.




Hadoop-Hdfs-trunk - Build # 635 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/635/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 733846 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-25446 / http-25447 / https-25448
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:25447
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.472 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.352 sec
   [cactus] Tomcat 5.x started on port [25447]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.33 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.333 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.858 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 60 minutes 21 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 634 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/634/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 719226 lines...]
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-11 12:23:49,932 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-11 12:23:49,932 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-11 12:23:49,933 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:47347, storageID=DS-582507416-127.0.1.1-47347-1302524629514, infoPort=54226, ipcPort=44767):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-11 12:23:49,933 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 44767
    [junit] 2011-04-11 12:23:49,933 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-11 12:23:49,933 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-11 12:23:49,933 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-11 12:23:49,933 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-11 12:23:49,934 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-11 12:23:50,034 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 48638
    [junit] 2011-04-11 12:23:50,035 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 48638: exiting
    [junit] 2011-04-11 12:23:50,035 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-11 12:23:50,035 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-11 12:23:50,035 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 48638
    [junit] 2011-04-11 12:23:50,035 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:34265, storageID=DS-1376291308-127.0.1.1-34265-1302524629363, infoPort=47209, ipcPort=48638):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-11 12:23:50,037 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-11 12:23:50,038 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-11 12:23:50,038 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:34265, storageID=DS-1376291308-127.0.1.1-34265-1302524629363, infoPort=47209, ipcPort=48638):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-11 12:23:50,038 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 48638
    [junit] 2011-04-11 12:23:50,038 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-11 12:23:50,039 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-11 12:23:50,039 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-11 12:23:50,039 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-11 12:23:50,140 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2857)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-11 12:23:50,140 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-11 12:23:50,140 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 4 
    [junit] 2011-04-11 12:23:50,142 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 47278
    [junit] 2011-04-11 12:23:50,142 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 47278: exiting
    [junit] 2011-04-11 12:23:50,142 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 47278
    [junit] 2011-04-11 12:23:50,142 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 16, Failures: 0, Errors: 0, Time elapsed: 95.075 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 50 minutes 31 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09

Error Message:
Was waiting too long for a replica to become TEMPORARY

Stack Trace:
junit.framework.AssertionFailedError: Was waiting too long for a replica to become TEMPORARY
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.waitForTempReplica(TestBlockReport.java:514)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2fte182rgt(TestBlockReport.java:451)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09(TestBlockReport.java:429)




Hadoop-Hdfs-trunk - Build # 633 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/633/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 756184 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-54162 / http-54163 / https-54164
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:54163
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.5 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.352 sec
   [cactus] Tomcat 5.x started on port [54163]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.341 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.32 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.868 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 60 minutes 33 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 632 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/632/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 721228 lines...]
    [junit] 2011-04-09 12:23:33,036 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-09 12:23:33,036 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-09 12:23:33,037 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:33367, storageID=DS-2055774178-127.0.1.1-33367-1302351812614, infoPort=48133, ipcPort=32836):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-09 12:23:33,037 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 32836
    [junit] 2011-04-09 12:23:33,037 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-09 12:23:33,037 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-09 12:23:33,037 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-09 12:23:33,037 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-09 12:23:33,038 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-09 12:23:33,138 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 34146
    [junit] 2011-04-09 12:23:33,139 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 34146: exiting
    [junit] 2011-04-09 12:23:33,139 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 34146
    [junit] 2011-04-09 12:23:33,139 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-09 12:23:33,139 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-09 12:23:33,139 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:45214, storageID=DS-1160250199-127.0.1.1-45214-1302351812466, infoPort=56656, ipcPort=34146):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-09 12:23:33,141 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-09 12:23:33,242 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-09 12:23:33,242 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:45214, storageID=DS-1160250199-127.0.1.1-45214-1302351812466, infoPort=56656, ipcPort=34146):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-09 12:23:33,242 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 34146
    [junit] 2011-04-09 12:23:33,242 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-09 12:23:33,242 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-09 12:23:33,242 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-09 12:23:33,243 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-09 12:23:33,344 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-09 12:23:33,344 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 3 
    [junit] 2011-04-09 12:23:33,344 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2857)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-09 12:23:33,345 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 34612
    [junit] 2011-04-09 12:23:33,346 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 34612: exiting
    [junit] 2011-04-09 12:23:33,346 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 34612
    [junit] 2011-04-09 12:23:33,346 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 16, Failures: 0, Errors: 0, Time elapsed: 94.844 sec

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:747: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:505: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:688: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:662: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:730: Tests failed!

Total time: 50 minutes 16 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_29

Error Message:
null

Stack Trace:
junit.framework.AssertionFailedError: 
	at org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.runTest29_30(TestFiDataTransferProtocol2.java:153)
	at org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_29(TestFiDataTransferProtocol2.java:251)




Hadoop-Hdfs-trunk - Build # 631 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/631/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 736597 lines...]
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-08 12:22:13,792 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-08 12:22:13,792 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-08 12:22:13,793 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:35209, storageID=DS-1023019400-127.0.1.1-35209-1302265323192, infoPort=53571, ipcPort=35154):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-08 12:22:13,793 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 35154
    [junit] 2011-04-08 12:22:13,793 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-08 12:22:13,793 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-08 12:22:13,794 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-08 12:22:13,794 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-08 12:22:13,794 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-08 12:22:13,896 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 52755
    [junit] 2011-04-08 12:22:13,896 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 52755: exiting
    [junit] 2011-04-08 12:22:13,896 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-08 12:22:13,896 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-08 12:22:13,897 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:60366, storageID=DS-1346925635-127.0.1.1-60366-1302265323017, infoPort=51510, ipcPort=52755):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-08 12:22:13,897 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 52755
    [junit] 2011-04-08 12:22:13,899 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-08 12:22:13,999 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-08 12:22:13,999 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:60366, storageID=DS-1346925635-127.0.1.1-60366-1302265323017, infoPort=51510, ipcPort=52755):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-08 12:22:14,000 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 52755
    [junit] 2011-04-08 12:22:14,000 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-08 12:22:14,000 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-08 12:22:14,000 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-08 12:22:14,001 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-08 12:22:14,102 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-08 12:22:14,103 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 4 
    [junit] 2011-04-08 12:22:14,103 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2857)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-08 12:22:14,104 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 55573
    [junit] 2011-04-08 12:22:14,105 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 55573: exiting
    [junit] 2011-04-08 12:22:14,105 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 55573
    [junit] 2011-04-08 12:22:14,105 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.623 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 48 minutes 59 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testCount

Error Message:
not supposed to get here

Stack Trace:
java.lang.RuntimeException: not supposed to get here
	at org.apache.hadoop.fs.shell.FsCommand.run(FsCommand.java:51)
	at org.apache.hadoop.fs.shell.Command.runAll(Command.java:100)
	at org.apache.hadoop.hdfs.TestDFSShell.runCount(TestDFSShell.java:737)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2xc567w1396(TestDFSShell.java:705)
	at org.apache.hadoop.hdfs.TestDFSShell.testCount(TestDFSShell.java:694)




Hadoop-Hdfs-trunk - Build # 630 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/630/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 716427 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-36789 / http-36790 / https-36791
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:36790
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.483 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.323 sec
   [cactus] Tomcat 5.x started on port [36790]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.32 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.353 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.871 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 59 minutes 36 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 629 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/629/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 721564 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-21331 / http-21332 / https-21333
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:21332
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.464 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.373 sec
   [cactus] Tomcat 5.x started on port [21332]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.315 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.342 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.823 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 49 minutes 11 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 628 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/628/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 739272 lines...]
    [junit] 	... 11 more
    [junit] 2011-04-05 12:22:42,478 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-05 12:22:42,478 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-05 12:22:42,479 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:60473, storageID=DS-2030027606-127.0.1.1-60473-1302006152029, infoPort=37469, ipcPort=48812):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-05 12:22:42,479 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 48812
    [junit] 2011-04-05 12:22:42,479 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-05 12:22:42,479 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-05 12:22:42,480 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-05 12:22:42,480 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-05 12:22:42,480 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-05 12:22:42,582 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 46639
    [junit] 2011-04-05 12:22:42,582 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 46639: exiting
    [junit] 2011-04-05 12:22:42,583 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 46639
    [junit] 2011-04-05 12:22:42,583 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-05 12:22:42,583 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:48393, storageID=DS-1552435921-127.0.1.1-48393-1302006151861, infoPort=37777, ipcPort=46639):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-05 12:22:42,584 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-05 12:22:42,684 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-05 12:22:42,685 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:48393, storageID=DS-1552435921-127.0.1.1-48393-1302006151861, infoPort=37777, ipcPort=46639):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-05 12:22:42,685 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 46639
    [junit] 2011-04-05 12:22:42,685 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-05 12:22:42,685 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-05 12:22:42,685 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-05 12:22:42,686 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-05 12:22:42,788 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2857)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-05 12:22:42,788 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-05 12:22:42,788 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3 
    [junit] 2011-04-05 12:22:42,790 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 58226
    [junit] 2011-04-05 12:22:42,790 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 58226: exiting
    [junit] 2011-04-05 12:22:42,790 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 58226
    [junit] 2011-04-05 12:22:42,790 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.457 sec

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:747: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:505: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:688: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:662: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:730: Tests failed!

Total time: 49 minutes 32 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_29

Error Message:
null

Stack Trace:
junit.framework.AssertionFailedError: 
	at org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.runTest29_30(TestFiDataTransferProtocol2.java:153)
	at org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_29(TestFiDataTransferProtocol2.java:251)




Hadoop-Hdfs-trunk - Build # 627 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/627/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 718791 lines...]
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-04-04 12:47:28,815 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-04 12:47:28,815 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-04 12:47:28,815 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:57751, storageID=DS-1453852092-127.0.1.1-57751-1301921238223, infoPort=40641, ipcPort=44415):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-04 12:47:28,816 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 44415
    [junit] 2011-04-04 12:47:28,816 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-04 12:47:28,816 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-04 12:47:28,816 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-04 12:47:28,816 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-04 12:47:28,817 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-04 12:47:28,918 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 48120
    [junit] 2011-04-04 12:47:28,918 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 48120: exiting
    [junit] 2011-04-04 12:47:28,932 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 48120
    [junit] 2011-04-04 12:47:28,933 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-04 12:47:28,933 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:56077, storageID=DS-244328423-127.0.1.1-56077-1301921238026, infoPort=60079, ipcPort=48120):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-04 12:47:28,933 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-04 12:47:29,033 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-04 12:47:29,034 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:56077, storageID=DS-244328423-127.0.1.1-56077-1301921238026, infoPort=60079, ipcPort=48120):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-04 12:47:29,034 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 48120
    [junit] 2011-04-04 12:47:29,034 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-04 12:47:29,034 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-04 12:47:29,035 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-04 12:47:29,035 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-04 12:47:29,137 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2857)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-04 12:47:29,137 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-04 12:47:29,137 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 11 2 
    [junit] 2011-04-04 12:47:29,139 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 45063
    [junit] 2011-04-04 12:47:29,139 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 45063: exiting
    [junit] 2011-04-04 12:47:29,140 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 45063
    [junit] 2011-04-04 12:47:29,140 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.543 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 73 minutes 42 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestLargeBlock.testLargeBlockSize

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks expected:<2> but was:<1>
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2j2e00jrg8(TestBlockReport.java:414)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08(TestBlockReport.java:390)




Hadoop-Hdfs-trunk - Build # 626 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/626/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 723583 lines...]
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-04-03 12:23:29,427 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-03 12:23:29,427 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-03 12:23:29,427 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:51460, storageID=DS-1312723792-127.0.1.1-51460-1301833398906, infoPort=42430, ipcPort=50455):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-04-03 12:23:29,427 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 50455
    [junit] 2011-04-03 12:23:29,428 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-03 12:23:29,428 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-03 12:23:29,428 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-03 12:23:29,428 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-03 12:23:29,429 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-04-03 12:23:29,530 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 55674
    [junit] 2011-04-03 12:23:29,530 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 55674: exiting
    [junit] 2011-04-03 12:23:29,531 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 55674
    [junit] 2011-04-03 12:23:29,531 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-04-03 12:23:29,531 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:52044, storageID=DS-2081569357-127.0.1.1-52044-1301833398732, infoPort=47230, ipcPort=55674):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-04-03 12:23:29,531 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-04-03 12:23:29,632 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-04-03 12:23:29,632 INFO  datanode.DataNode (DataNode.java:run(1496)) - DatanodeRegistration(127.0.0.1:52044, storageID=DS-2081569357-127.0.1.1-52044-1301833398732, infoPort=47230, ipcPort=55674):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-04-03 12:23:29,632 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 55674
    [junit] 2011-04-03 12:23:29,633 INFO  datanode.DataNode (DataNode.java:shutdown(791)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-04-03 12:23:29,633 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-04-03 12:23:29,633 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-04-03 12:23:29,634 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-04-03 12:23:29,645 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-03 12:23:29,645 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2857)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-04-03 12:23:29,645 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 4 3 
    [junit] 2011-04-03 12:23:29,647 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 37535
    [junit] 2011-04-03 12:23:29,647 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 37535: exiting
    [junit] 2011-04-03 12:23:29,647 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 37535
    [junit] 2011-04-03 12:23:29,647 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.37 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 49 minutes 53 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks expected:<2> but was:<1>
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2fte182rgt(TestBlockReport.java:457)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09(TestBlockReport.java:429)




Hadoop-Hdfs-trunk - Build # 625 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/625/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 713798 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-34955 / http-34956 / https-34957
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:34956
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.455 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.323 sec
   [cactus] Tomcat 5.x started on port [34956]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.336 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.316 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.861 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 49 minutes 34 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 624 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/624/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 738293 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-25460 / http-25461 / https-25462
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:25461
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.489 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.492 sec
   [cactus] Tomcat 5.x started on port [25461]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.329 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.34 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.882 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 50 minutes 15 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 623 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/623/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 710777 lines...]
    [junit] 2011-03-31 12:33:18,494 INFO  datanode.DataNode (BlockReceiver.java:run(926)) - PacketResponder blk_8517475587862166522_1001 0 : Thread is interrupted.
    [junit] 2011-03-31 12:33:18,494 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2011-03-31 12:33:18,494 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-03-31 12:33:18,495 INFO  datanode.DataNode (BlockReceiver.java:run(1010)) - PacketResponder 0 for block blk_8517475587862166522_1001 terminating
    [junit] 2011-03-31 12:33:18,495 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:51778, storageID=DS-1795582721-127.0.1.1-51778-1301574787630, infoPort=48804, ipcPort=48740)
    [junit] 2011-03-31 12:33:18,496 ERROR datanode.DataNode (DataXceiver.java:run(132)) - DatanodeRegistration(127.0.0.1:51778, storageID=DS-1795582721-127.0.1.1-51778-1301574787630, infoPort=48804, ipcPort=48740):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:463)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:651)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:360)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:332)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-31 12:33:18,497 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-31 12:33:18,598 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-31 12:33:18,598 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:51778, storageID=DS-1795582721-127.0.1.1-51778-1301574787630, infoPort=48804, ipcPort=48740):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-31 12:33:18,598 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 48740
    [junit] 2011-03-31 12:33:18,598 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-31 12:33:18,599 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-31 12:33:18,599 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-31 12:33:18,599 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-31 12:33:18,701 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-31 12:33:18,701 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 3 
    [junit] 2011-03-31 12:33:18,701 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2857)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-31 12:33:18,703 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 59355
    [junit] 2011-03-31 12:33:18,703 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 59355: exiting
    [junit] 2011-03-31 12:33:18,703 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 59355
    [junit] 2011-03-31 12:33:18,703 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.512 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 59 minutes 47 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 112047, r=ReplicaInPipeline, blk_5010047870379614353_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 0   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_5010047870379614353   bytesAcked=0   bytesOnDisk=0

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 112047, r=ReplicaInPipeline, blk_5010047870379614353_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 0
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_5010047870379614353
  bytesAcked=0
  bytesOnDisk=0
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1375)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2021)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tgw(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)




Hadoop-Hdfs-trunk - Build # 622 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/622/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 698802 lines...]
    [junit] 2011-03-30 12:22:26,214 INFO  datanode.DataNode (BlockReceiver.java:run(926)) - PacketResponder blk_-4878957023449505870_1001 0 : Thread is interrupted.
    [junit] 2011-03-30 12:22:26,214 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:51648, storageID=DS-1891937375-127.0.1.1-51648-1301487735313, infoPort=55741, ipcPort=57432)
    [junit] 2011-03-30 12:22:26,214 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 2
    [junit] 2011-03-30 12:22:26,214 INFO  datanode.DataNode (BlockReceiver.java:run(1010)) - PacketResponder 0 for block blk_-4878957023449505870_1001 terminating
    [junit] 2011-03-30 12:22:26,214 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:51648, storageID=DS-1891937375-127.0.1.1-51648-1301487735313, infoPort=55741, ipcPort=57432)
    [junit] 2011-03-30 12:22:26,215 ERROR datanode.DataNode (DataXceiver.java:run(132)) - DatanodeRegistration(127.0.0.1:51648, storageID=DS-1891937375-127.0.1.1-51648-1301487735313, infoPort=55741, ipcPort=57432):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:463)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:651)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:360)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:332)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-30 12:22:26,217 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-30 12:22:26,317 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-30 12:22:26,317 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:51648, storageID=DS-1891937375-127.0.1.1-51648-1301487735313, infoPort=55741, ipcPort=57432):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-30 12:22:26,317 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 57432
    [junit] 2011-03-30 12:22:26,318 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-30 12:22:26,318 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-30 12:22:26,318 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-30 12:22:26,319 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-30 12:22:26,420 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-30 12:22:26,420 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-30 12:22:26,420 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 2 
    [junit] 2011-03-30 12:22:26,422 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 41503
    [junit] 2011-03-30 12:22:26,423 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 41503: exiting
    [junit] 2011-03-30 12:22:26,423 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 41503
    [junit] 2011-03-30 12:22:26,423 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.721 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 48 minutes 57 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 130213, r=ReplicaInPipeline, blk_-2736525394384087704_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 0   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-2736525394384087704   bytesAcked=0   bytesOnDisk=0

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 130213, r=ReplicaInPipeline, blk_-2736525394384087704_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 0
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-2736525394384087704
  bytesAcked=0
  bytesOnDisk=0
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1375)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2021)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tgv(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)


FAILED:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
One of the tests failed. See the Detailed results to identify the command that failed

Stack Trace:
junit.framework.AssertionFailedError: One of the tests failed. See the Detailed results to identify the command that failed
	at org.apache.hadoop.cli.CLITestHelper.displayResults(CLITestHelper.java:257)
	at org.apache.hadoop.cli.CLITestHelper.tearDown(CLITestHelper.java:119)
	at org.apache.hadoop.cli.TestHDFSCLI.tearDown(TestHDFSCLI.java:81)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testURIPaths

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ltte1n131j(TestDFSShell.java:516)
	at org.apache.hadoop.hdfs.TestDFSShell.testURIPaths(TestDFSShell.java:449)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22e88ot1396(TestDFSShell.java:889)
	at org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions(TestDFSShell.java:851)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testDFSShell

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2prqrtk13a5(TestDFSShell.java:920)
	at org.apache.hadoop.hdfs.TestDFSShell.testDFSShell(TestDFSShell.java:916)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testRemoteException

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ayein613dt(TestDFSShell.java:1143)
	at org.apache.hadoop.hdfs.TestDFSShell.testRemoteException(TestDFSShell.java:1136)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testGet

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2tpje3v13es(TestDFSShell.java:1182)
	at org.apache.hadoop.hdfs.TestDFSShell.testGet(TestDFSShell.java:1179)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testLsr

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22emby813g1(TestDFSShell.java:1240)
	at org.apache.hadoop.hdfs.TestDFSShell.testLsr(TestDFSShell.java:1238)




Hadoop-Hdfs-trunk - Build # 621 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/621/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 699203 lines...]
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-29 12:32:37,695 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-29 12:32:37,696 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-29 12:32:37,696 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:53973, storageID=DS-777661655-127.0.1.1-53973-1301401947074, infoPort=47607, ipcPort=60453):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-03-29 12:32:37,696 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 60453
    [junit] 2011-03-29 12:32:37,696 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-29 12:32:37,698 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-29 12:32:37,698 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-29 12:32:37,698 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-29 12:32:37,699 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-29 12:32:37,800 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 41601
    [junit] 2011-03-29 12:32:37,800 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 41601: exiting
    [junit] 2011-03-29 12:32:37,801 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 41601
    [junit] 2011-03-29 12:32:37,801 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-03-29 12:32:37,801 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:47815, storageID=DS-976453409-127.0.1.1-47815-1301401946890, infoPort=49694, ipcPort=41601):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-29 12:32:37,801 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-29 12:32:37,902 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-29 12:32:37,902 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:47815, storageID=DS-976453409-127.0.1.1-47815-1301401946890, infoPort=49694, ipcPort=41601):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-29 12:32:37,902 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 41601
    [junit] 2011-03-29 12:32:37,903 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-29 12:32:37,903 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-29 12:32:37,903 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-29 12:32:37,903 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-29 12:32:38,005 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-29 12:32:38,005 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 1 
    [junit] 2011-03-29 12:32:38,005 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-29 12:32:38,007 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 37641
    [junit] 2011-03-29 12:32:38,007 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 37641: exiting
    [junit] 2011-03-29 12:32:38,008 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 37641
    [junit] 2011-03-29 12:32:38,008 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.599 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 59 minutes 8 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 102717, r=ReplicaInPipeline, blk_-3557091731890250719_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 0   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-3557091731890250719   bytesAcked=0   bytesOnDisk=0

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 102717, r=ReplicaInPipeline, blk_-3557091731890250719_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 0
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-3557091731890250719
  bytesAcked=0
  bytesOnDisk=0
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1376)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2021)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tgv(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)


FAILED:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
One of the tests failed. See the Detailed results to identify the command that failed

Stack Trace:
junit.framework.AssertionFailedError: One of the tests failed. See the Detailed results to identify the command that failed
	at org.apache.hadoop.cli.CLITestHelper.displayResults(CLITestHelper.java:257)
	at org.apache.hadoop.cli.CLITestHelper.tearDown(CLITestHelper.java:119)
	at org.apache.hadoop.cli.TestHDFSCLI.tearDown(TestHDFSCLI.java:81)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testURIPaths

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ltte1n131j(TestDFSShell.java:516)
	at org.apache.hadoop.hdfs.TestDFSShell.testURIPaths(TestDFSShell.java:449)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22e88ot1396(TestDFSShell.java:889)
	at org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions(TestDFSShell.java:851)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testDFSShell

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2prqrtk13a5(TestDFSShell.java:920)
	at org.apache.hadoop.hdfs.TestDFSShell.testDFSShell(TestDFSShell.java:916)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testRemoteException

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ayein613dt(TestDFSShell.java:1143)
	at org.apache.hadoop.hdfs.TestDFSShell.testRemoteException(TestDFSShell.java:1136)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testGet

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2tpje3v13es(TestDFSShell.java:1182)
	at org.apache.hadoop.hdfs.TestDFSShell.testGet(TestDFSShell.java:1179)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testLsr

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22emby813g1(TestDFSShell.java:1240)
	at org.apache.hadoop.hdfs.TestDFSShell.testLsr(TestDFSShell.java:1238)




Hadoop-Hdfs-trunk - Build # 620 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/620/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 690185 lines...]
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-28 12:32:38,702 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-28 12:32:38,702 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-28 12:32:38,703 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:59927, storageID=DS-1180270094-127.0.1.1-59927-1301315548093, infoPort=44831, ipcPort=43258):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-03-28 12:32:38,703 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 43258
    [junit] 2011-03-28 12:32:38,703 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-28 12:32:38,703 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-28 12:32:38,703 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-28 12:32:38,704 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-28 12:32:38,704 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-28 12:32:38,805 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 47290
    [junit] 2011-03-28 12:32:38,806 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 47290: exiting
    [junit] 2011-03-28 12:32:38,806 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 47290
    [junit] 2011-03-28 12:32:38,806 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-03-28 12:32:38,806 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:53628, storageID=DS-1160082990-127.0.1.1-53628-1301315547913, infoPort=32856, ipcPort=47290):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-28 12:32:38,806 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-03-28 12:32:38,907 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-28 12:32:38,908 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:53628, storageID=DS-1160082990-127.0.1.1-53628-1301315547913, infoPort=32856, ipcPort=47290):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-28 12:32:38,908 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 47290
    [junit] 2011-03-28 12:32:38,908 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-28 12:32:38,908 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-28 12:32:38,908 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-28 12:32:38,909 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-28 12:32:39,012 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-28 12:32:39,012 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 3 
    [junit] 2011-03-28 12:32:39,012 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-28 12:32:39,014 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 43074
    [junit] 2011-03-28 12:32:39,015 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 43074: exiting
    [junit] 2011-03-28 12:32:39,015 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 43074
    [junit] 2011-03-28 12:32:39,015 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.6 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 59 minutes 5 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 79419, r=ReplicaInPipeline, blk_-6718005118883221936_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 0   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-6718005118883221936   bytesAcked=0   bytesOnDisk=0

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 79419, r=ReplicaInPipeline, blk_-6718005118883221936_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 0
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-6718005118883221936
  bytesAcked=0
  bytesOnDisk=0
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1387)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2021)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tgc(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)


FAILED:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
One of the tests failed. See the Detailed results to identify the command that failed

Stack Trace:
junit.framework.AssertionFailedError: One of the tests failed. See the Detailed results to identify the command that failed
	at org.apache.hadoop.cli.CLITestHelper.displayResults(CLITestHelper.java:257)
	at org.apache.hadoop.cli.CLITestHelper.tearDown(CLITestHelper.java:119)
	at org.apache.hadoop.cli.TestHDFSCLI.tearDown(TestHDFSCLI.java:81)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testURIPaths

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ltte1n1310(TestDFSShell.java:516)
	at org.apache.hadoop.hdfs.TestDFSShell.testURIPaths(TestDFSShell.java:449)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22e88ot138n(TestDFSShell.java:889)
	at org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions(TestDFSShell.java:851)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testDFSShell

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2prqrtk139m(TestDFSShell.java:920)
	at org.apache.hadoop.hdfs.TestDFSShell.testDFSShell(TestDFSShell.java:916)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testRemoteException

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ayein613da(TestDFSShell.java:1143)
	at org.apache.hadoop.hdfs.TestDFSShell.testRemoteException(TestDFSShell.java:1136)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testGet

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2tpje3v13e9(TestDFSShell.java:1182)
	at org.apache.hadoop.hdfs.TestDFSShell.testGet(TestDFSShell.java:1179)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testLsr

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22emby813fi(TestDFSShell.java:1240)
	at org.apache.hadoop.hdfs.TestDFSShell.testLsr(TestDFSShell.java:1238)




Hadoop-Hdfs-trunk - Build # 619 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/619/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 713643 lines...]
    [junit] 
    [junit] 2011-03-27 12:31:39,099 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:43089, storageID=DS-402378168-127.0.1.1-43089-1301229088198, infoPort=54919, ipcPort=41226)
    [junit] 2011-03-27 12:31:39,099 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:43089, storageID=DS-402378168-127.0.1.1-43089-1301229088198, infoPort=54919, ipcPort=41226)
    [junit] 2011-03-27 12:31:39,098 INFO  datanode.DataNode (BlockReceiver.java:run(926)) - PacketResponder blk_-573293244434035474_1001 0 : Thread is interrupted.
    [junit] 2011-03-27 12:31:39,099 INFO  datanode.DataNode (BlockReceiver.java:run(1010)) - PacketResponder 0 for block blk_-573293244434035474_1001 terminating
    [junit] 2011-03-27 12:31:39,099 ERROR datanode.DataNode (DataXceiver.java:run(132)) - DatanodeRegistration(127.0.0.1:43089, storageID=DS-402378168-127.0.1.1-43089-1301229088198, infoPort=54919, ipcPort=41226):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:463)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:651)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:393)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:332)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-27 12:31:39,101 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-27 12:31:39,201 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-27 12:31:39,201 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:43089, storageID=DS-402378168-127.0.1.1-43089-1301229088198, infoPort=54919, ipcPort=41226):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-27 12:31:39,202 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 41226
    [junit] 2011-03-27 12:31:39,202 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-27 12:31:39,202 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-27 12:31:39,202 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-27 12:31:39,203 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-27 12:31:39,304 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-27 12:31:39,304 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 10 3 
    [junit] 2011-03-27 12:31:39,305 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-27 12:31:39,306 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 42780
    [junit] 2011-03-27 12:31:39,307 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 42780: exiting
    [junit] 2011-03-27 12:31:39,307 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 42780
    [junit] 2011-03-27 12:31:39,308 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.42 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 58 minutes 12 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 84255, r=ReplicaInPipeline, blk_-8473050276165535237_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 65536   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-8473050276165535237   bytesAcked=0   bytesOnDisk=65536

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 84255, r=ReplicaInPipeline, blk_-8473050276165535237_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 65536
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-8473050276165535237
  bytesAcked=0
  bytesOnDisk=65536
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1387)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2021)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tgc(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)


FAILED:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
One of the tests failed. See the Detailed results to identify the command that failed

Stack Trace:
junit.framework.AssertionFailedError: One of the tests failed. See the Detailed results to identify the command that failed
	at org.apache.hadoop.cli.CLITestHelper.displayResults(CLITestHelper.java:257)
	at org.apache.hadoop.cli.CLITestHelper.tearDown(CLITestHelper.java:119)
	at org.apache.hadoop.cli.TestHDFSCLI.tearDown(TestHDFSCLI.java:81)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testURIPaths

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ltte1n1310(TestDFSShell.java:516)
	at org.apache.hadoop.hdfs.TestDFSShell.testURIPaths(TestDFSShell.java:449)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22e88ot138n(TestDFSShell.java:889)
	at org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions(TestDFSShell.java:851)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testDFSShell

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2prqrtk139m(TestDFSShell.java:920)
	at org.apache.hadoop.hdfs.TestDFSShell.testDFSShell(TestDFSShell.java:916)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testRemoteException

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ayein613da(TestDFSShell.java:1143)
	at org.apache.hadoop.hdfs.TestDFSShell.testRemoteException(TestDFSShell.java:1136)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testGet

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2tpje3v13e9(TestDFSShell.java:1182)
	at org.apache.hadoop.hdfs.TestDFSShell.testGet(TestDFSShell.java:1179)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testLsr

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22emby813fi(TestDFSShell.java:1240)
	at org.apache.hadoop.hdfs.TestDFSShell.testLsr(TestDFSShell.java:1238)




Hadoop-Hdfs-trunk - Build # 618 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/618/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 711591 lines...]
    [junit] 2011-03-26 12:22:04,765 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 2
    [junit] 2011-03-26 12:22:04,766 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:54302, storageID=DS-279690435-127.0.1.1-54302-1301142113914, infoPort=34030, ipcPort=42716)
    [junit] 2011-03-26 12:22:04,766 INFO  datanode.DataNode (BlockReceiver.java:run(926)) - PacketResponder blk_-8470746226147512846_1001 0 : Thread is interrupted.
    [junit] 2011-03-26 12:22:04,766 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:54302, storageID=DS-279690435-127.0.1.1-54302-1301142113914, infoPort=34030, ipcPort=42716)
    [junit] 2011-03-26 12:22:04,766 INFO  datanode.DataNode (BlockReceiver.java:run(1010)) - PacketResponder 0 for block blk_-8470746226147512846_1001 terminating
    [junit] 2011-03-26 12:22:04,767 ERROR datanode.DataNode (DataXceiver.java:run(132)) - DatanodeRegistration(127.0.0.1:54302, storageID=DS-279690435-127.0.1.1-54302-1301142113914, infoPort=34030, ipcPort=42716):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:463)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:651)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:393)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:332)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-26 12:22:04,768 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-26 12:22:04,868 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-26 12:22:04,868 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:54302, storageID=DS-279690435-127.0.1.1-54302-1301142113914, infoPort=34030, ipcPort=42716):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-26 12:22:04,869 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 42716
    [junit] 2011-03-26 12:22:04,869 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-26 12:22:04,869 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-26 12:22:04,869 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-26 12:22:04,870 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-26 12:22:04,882 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-26 12:22:04,882 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-26 12:22:04,882 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 3 3 
    [junit] 2011-03-26 12:22:04,884 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 51057
    [junit] 2011-03-26 12:22:04,884 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 51057: exiting
    [junit] 2011-03-26 12:22:04,884 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 51057
    [junit] 2011-03-26 12:22:04,884 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.623 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 48 minutes 37 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 125757, r=ReplicaInPipeline, blk_-6894539633202504254_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 65536   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-6894539633202504254   bytesAcked=0   bytesOnDisk=65536

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 125757, r=ReplicaInPipeline, blk_-6894539633202504254_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 65536
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-6894539633202504254
  bytesAcked=0
  bytesOnDisk=65536
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1387)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2021)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tgc(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)


FAILED:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
One of the tests failed. See the Detailed results to identify the command that failed

Stack Trace:
junit.framework.AssertionFailedError: One of the tests failed. See the Detailed results to identify the command that failed
	at org.apache.hadoop.cli.CLITestHelper.displayResults(CLITestHelper.java:257)
	at org.apache.hadoop.cli.CLITestHelper.tearDown(CLITestHelper.java:119)
	at org.apache.hadoop.cli.TestHDFSCLI.tearDown(TestHDFSCLI.java:81)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testURIPaths

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ltte1n1310(TestDFSShell.java:516)
	at org.apache.hadoop.hdfs.TestDFSShell.testURIPaths(TestDFSShell.java:449)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22e88ot138n(TestDFSShell.java:889)
	at org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions(TestDFSShell.java:851)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testDFSShell

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2prqrtk139m(TestDFSShell.java:920)
	at org.apache.hadoop.hdfs.TestDFSShell.testDFSShell(TestDFSShell.java:916)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testRemoteException

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ayein613da(TestDFSShell.java:1143)
	at org.apache.hadoop.hdfs.TestDFSShell.testRemoteException(TestDFSShell.java:1136)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testGet

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2tpje3v13e9(TestDFSShell.java:1182)
	at org.apache.hadoop.hdfs.TestDFSShell.testGet(TestDFSShell.java:1179)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testLsr

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22emby813fi(TestDFSShell.java:1240)
	at org.apache.hadoop.hdfs.TestDFSShell.testLsr(TestDFSShell.java:1238)




Hadoop-Hdfs-trunk - Build # 617 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/617/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 706775 lines...]
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-25 12:23:16,150 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-25 12:23:16,151 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-25 12:23:16,151 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:36251, storageID=DS-468696142-127.0.1.1-36251-1301055785538, infoPort=55782, ipcPort=36232):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-03-25 12:23:16,151 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 36232
    [junit] 2011-03-25 12:23:16,151 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-25 12:23:16,151 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-25 12:23:16,152 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-25 12:23:16,152 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-25 12:23:16,152 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-25 12:23:16,254 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 52971
    [junit] 2011-03-25 12:23:16,254 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 52971: exiting
    [junit] 2011-03-25 12:23:16,254 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 52971
    [junit] 2011-03-25 12:23:16,254 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-03-25 12:23:16,255 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-03-25 12:23:16,255 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:41445, storageID=DS-1304287072-127.0.1.1-41445-1301055785353, infoPort=53886, ipcPort=52971):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-25 12:23:16,257 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-25 12:23:16,357 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-25 12:23:16,358 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:41445, storageID=DS-1304287072-127.0.1.1-41445-1301055785353, infoPort=53886, ipcPort=52971):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-25 12:23:16,358 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 52971
    [junit] 2011-03-25 12:23:16,358 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-25 12:23:16,358 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-25 12:23:16,359 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-25 12:23:16,359 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-25 12:23:16,460 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-25 12:23:16,460 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-25 12:23:16,461 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3 
    [junit] 2011-03-25 12:23:16,463 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 36444
    [junit] 2011-03-25 12:23:16,463 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 36444: exiting
    [junit] 2011-03-25 12:23:16,463 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 36444
    [junit] 2011-03-25 12:23:16,463 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.347 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 49 minutes 52 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
7 tests failed.
FAILED:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
One of the tests failed. See the Detailed results to identify the command that failed

Stack Trace:
junit.framework.AssertionFailedError: One of the tests failed. See the Detailed results to identify the command that failed
	at org.apache.hadoop.cli.CLITestHelper.displayResults(CLITestHelper.java:257)
	at org.apache.hadoop.cli.CLITestHelper.tearDown(CLITestHelper.java:119)
	at org.apache.hadoop.cli.TestHDFSCLI.tearDown(TestHDFSCLI.java:81)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testURIPaths

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ltte1n130s(TestDFSShell.java:516)
	at org.apache.hadoop.hdfs.TestDFSShell.testURIPaths(TestDFSShell.java:449)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22e88ot138f(TestDFSShell.java:889)
	at org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions(TestDFSShell.java:851)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testDFSShell

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2prqrtk139e(TestDFSShell.java:920)
	at org.apache.hadoop.hdfs.TestDFSShell.testDFSShell(TestDFSShell.java:916)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testRemoteException

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ayein613d2(TestDFSShell.java:1143)
	at org.apache.hadoop.hdfs.TestDFSShell.testRemoteException(TestDFSShell.java:1136)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testGet

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2tpje3v13e1(TestDFSShell.java:1182)
	at org.apache.hadoop.hdfs.TestDFSShell.testGet(TestDFSShell.java:1179)


FAILED:  org.apache.hadoop.hdfs.TestDFSShell.testLsr

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22emby813fa(TestDFSShell.java:1240)
	at org.apache.hadoop.hdfs.TestDFSShell.testLsr(TestDFSShell.java:1238)




Hadoop-Hdfs-trunk - Build # 616 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/616/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 708649 lines...]
    [junit] 2011-03-24 12:22:35,345 INFO  datanode.DataNode (BlockReceiver.java:run(914)) - PacketResponder blk_6538719823285349735_1001 0 : Thread is interrupted.
    [junit] 2011-03-24 12:22:35,344 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 3
    [junit] 2011-03-24 12:22:35,344 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 34307
    [junit] 2011-03-24 12:22:35,345 INFO  datanode.DataNode (BlockReceiver.java:run(999)) - PacketResponder 0 for block blk_6538719823285349735_1001 terminating
    [junit] 2011-03-24 12:22:35,345 INFO  datanode.BlockReceiverAspects (BlockReceiverAspects.aj:ajc$after$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$9$725950a6(220)) - FI: blockFileClose, datanode=DatanodeRegistration(127.0.0.1:42931, storageID=DS-1663091717-127.0.1.1-42931-1300969344471, infoPort=45787, ipcPort=34307)
    [junit] 2011-03-24 12:22:35,346 ERROR datanode.DataNode (DataXceiver.java:run(132)) - DatanodeRegistration(127.0.0.1:42931, storageID=DS-1663091717-127.0.1.1-42931-1300969344471, infoPort=45787, ipcPort=34307):DataXceiver
    [junit] java.lang.RuntimeException: java.lang.InterruptedException: sleep interrupted
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:82)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:346)
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:451)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:639)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:332)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-24 12:22:35,347 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-24 12:22:35,448 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-24 12:22:35,448 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:42931, storageID=DS-1663091717-127.0.1.1-42931-1300969344471, infoPort=45787, ipcPort=34307):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-24 12:22:35,448 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 34307
    [junit] 2011-03-24 12:22:35,448 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-24 12:22:35,449 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-24 12:22:35,449 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-24 12:22:35,449 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-24 12:22:35,551 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-24 12:22:35,551 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 3 
    [junit] 2011-03-24 12:22:35,551 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-24 12:22:35,553 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 44144
    [junit] 2011-03-24 12:22:35,553 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 44144: exiting
    [junit] 2011-03-24 12:22:35,554 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 44144
    [junit] 2011-03-24 12:22:35,554 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.558 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 48 minutes 29 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
7 tests failed.
REGRESSION:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
One of the tests failed. See the Detailed results to identify the command that failed

Stack Trace:
junit.framework.AssertionFailedError: One of the tests failed. See the Detailed results to identify the command that failed
	at org.apache.hadoop.cli.CLITestHelper.displayResults(CLITestHelper.java:257)
	at org.apache.hadoop.cli.CLITestHelper.tearDown(CLITestHelper.java:119)
	at org.apache.hadoop.cli.TestHDFSCLI.tearDown(TestHDFSCLI.java:81)


REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testURIPaths

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ltte1n130s(TestDFSShell.java:516)
	at org.apache.hadoop.hdfs.TestDFSShell.testURIPaths(TestDFSShell.java:449)


REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions

Error Message:
null expected:<[reptiles]> but was:<[supergroup]>

Stack Trace:
junit.framework.ComparisonFailure: null expected:<[reptiles]> but was:<[supergroup]>
	at org.apache.hadoop.hdfs.TestDFSShell.confirmOwner(TestDFSShell.java:846)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22e88ot138f(TestDFSShell.java:889)
	at org.apache.hadoop.hdfs.TestDFSShell.testFilePermissions(TestDFSShell.java:851)


REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testDFSShell

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2prqrtk139e(TestDFSShell.java:920)
	at org.apache.hadoop.hdfs.TestDFSShell.testDFSShell(TestDFSShell.java:916)


REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testRemoteException

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2ayein613d2(TestDFSShell.java:1143)
	at org.apache.hadoop.hdfs.TestDFSShell.testRemoteException(TestDFSShell.java:1136)


REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testGet

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_2tpje3v13e1(TestDFSShell.java:1182)
	at org.apache.hadoop.hdfs.TestDFSShell.testGet(TestDFSShell.java:1179)


REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testLsr

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1420)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:210)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDFSShell.__CLR3_0_22emby813fa(TestDFSShell.java:1240)
	at org.apache.hadoop.hdfs.TestDFSShell.testLsr(TestDFSShell.java:1238)




Hadoop-Hdfs-trunk - Build # 615 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/615/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 6 lines...]
java.lang.NullPointerException
	at hudson.tasks.JavadocArchiver.perform(JavadocArchiver.java:94)
	at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:19)
	at hudson.model.AbstractBuild$AbstractRunner.perform(AbstractBuild.java:644)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:623)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:601)
	at hudson.model.Build$RunnerImpl.post2(Build.java:159)
	at hudson.model.AbstractBuild$AbstractRunner.post(AbstractBuild.java:570)
	at hudson.model.Run.run(Run.java:1386)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:46)
	at hudson.model.ResourceController.execute(ResourceController.java:88)
	at hudson.model.Executor.run(Executor.java:145)
Archiving artifacts
Recording test results
ERROR: Publisher hudson.tasks.junit.JUnitResultArchiver aborted due to exception
java.lang.NullPointerException
	at hudson.tasks.junit.JUnitParser.parse(JUnitParser.java:83)
	at hudson.tasks.junit.JUnitResultArchiver.parse(JUnitResultArchiver.java:123)
	at hudson.tasks.junit.JUnitResultArchiver.perform(JUnitResultArchiver.java:135)
	at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:19)
	at hudson.model.AbstractBuild$AbstractRunner.perform(AbstractBuild.java:644)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:623)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:601)
	at hudson.model.Build$RunnerImpl.post2(Build.java:159)
	at hudson.model.AbstractBuild$AbstractRunner.post(AbstractBuild.java:570)
	at hudson.model.Run.run(Run.java:1386)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:46)
	at hudson.model.ResourceController.execute(ResourceController.java:88)
	at hudson.model.Executor.run(Executor.java:145)
Recording fingerprints
ERROR: Unable to record fingerprints because there's no workspace
ERROR: Publisher hudson.plugins.violations.ViolationsPublisher aborted due to exception
java.lang.NullPointerException
	at hudson.plugins.violations.ViolationsPublisher.perform(ViolationsPublisher.java:74)
	at hudson.tasks.BuildStepMonitor$3.perform(BuildStepMonitor.java:36)
	at hudson.model.AbstractBuild$AbstractRunner.perform(AbstractBuild.java:644)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:623)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:601)
	at hudson.model.Build$RunnerImpl.post2(Build.java:159)
	at hudson.model.AbstractBuild$AbstractRunner.post(AbstractBuild.java:570)
	at hudson.model.Run.run(Run.java:1386)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:46)
	at hudson.model.ResourceController.execute(ResourceController.java:88)
	at hudson.model.Executor.run(Executor.java:145)
ERROR: Publisher hudson.plugins.clover.CloverPublisher aborted due to exception
java.lang.NullPointerException
	at hudson.plugins.clover.CloverPublisher.perform(CloverPublisher.java:137)
	at hudson.tasks.BuildStepMonitor$3.perform(BuildStepMonitor.java:36)
	at hudson.model.AbstractBuild$AbstractRunner.perform(AbstractBuild.java:644)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:623)
	at hudson.model.AbstractBuild$AbstractRunner.performAllBuildSteps(AbstractBuild.java:601)
	at hudson.model.Build$RunnerImpl.post2(Build.java:159)
	at hudson.model.AbstractBuild$AbstractRunner.post(AbstractBuild.java:570)
	at hudson.model.Run.run(Run.java:1386)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:46)
	at hudson.model.ResourceController.execute(ResourceController.java:88)
	at hudson.model.Executor.run(Executor.java:145)
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
No tests ran.

Hadoop-Hdfs-trunk - Build # 614 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/614/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 729780 lines...]
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-22 12:24:42,983 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-22 12:24:42,983 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-22 12:24:42,984 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:57260, storageID=DS-91605065-127.0.1.1-57260-1300796672283, infoPort=35711, ipcPort=34865):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-03-22 12:24:42,984 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 34865
    [junit] 2011-03-22 12:24:42,984 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-22 12:24:42,984 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-22 12:24:42,985 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-22 12:24:42,985 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-22 12:24:42,985 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-22 12:24:42,990 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 38522
    [junit] 2011-03-22 12:24:42,990 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-03-22 12:24:42,990 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-03-22 12:24:42,991 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 38522
    [junit] 2011-03-22 12:24:42,991 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:49868, storageID=DS-1067466969-127.0.1.1-49868-1300796672121, infoPort=48806, ipcPort=38522):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-22 12:24:42,993 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-22 12:24:42,995 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 38522: exiting
    [junit] 2011-03-22 12:24:43,093 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-22 12:24:43,093 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:49868, storageID=DS-1067466969-127.0.1.1-49868-1300796672121, infoPort=48806, ipcPort=38522):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-22 12:24:43,094 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 38522
    [junit] 2011-03-22 12:24:43,094 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-22 12:24:43,094 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-22 12:24:43,094 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-22 12:24:43,095 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-22 12:24:43,196 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-22 12:24:43,196 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 4 
    [junit] 2011-03-22 12:24:43,197 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-22 12:24:43,198 INFO  ipc.Server (Server.java:stop(1626)) - Stopping server on 34268
    [junit] 2011-03-22 12:24:43,198 INFO  ipc.Server (Server.java:run(1459)) - IPC Server handler 0 on 34268: exiting
    [junit] 2011-03-22 12:24:43,199 INFO  ipc.Server (Server.java:run(691)) - Stopping IPC Server Responder
    [junit] 2011-03-22 12:24:43,199 INFO  ipc.Server (Server.java:run(487)) - Stopping IPC Server listener on 34268
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.379 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 51 minutes 22 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 95378, r=ReplicaInPipeline, blk_1818901318025178337_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 0   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_1818901318025178337   bytesAcked=0   bytesOnDisk=0

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 95378, r=ReplicaInPipeline, blk_1818901318025178337_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 0
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_1818901318025178337
  bytesAcked=0
  bytesOnDisk=0
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1387)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2021)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tg4(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)




Hadoop-Hdfs-trunk - Build # 613 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/613/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 706069 lines...]
    [junit] 
    [junit] 2011-03-21 12:22:28,346 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 38735
    [junit] 2011-03-21 12:22:28,348 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-21 12:22:28,348 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-21 12:22:28,348 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:40498, storageID=DS-388299994-127.0.1.1-40498-1300710137771, infoPort=47549, ipcPort=38735):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-03-21 12:22:28,349 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 38735
    [junit] 2011-03-21 12:22:28,349 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-21 12:22:28,349 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-21 12:22:28,349 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-21 12:22:28,350 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-21 12:22:28,350 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-21 12:22:28,451 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 51456
    [junit] 2011-03-21 12:22:28,452 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 51456: exiting
    [junit] 2011-03-21 12:22:28,452 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 51456
    [junit] 2011-03-21 12:22:28,452 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-03-21 12:22:28,452 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:43128, storageID=DS-2103166115-127.0.1.1-43128-1300710137598, infoPort=42993, ipcPort=51456):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-21 12:22:28,452 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] 2011-03-21 12:22:28,454 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-21 12:22:28,555 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-21 12:22:28,555 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:43128, storageID=DS-2103166115-127.0.1.1-43128-1300710137598, infoPort=42993, ipcPort=51456):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-21 12:22:28,555 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 51456
    [junit] 2011-03-21 12:22:28,556 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-21 12:22:28,556 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-21 12:22:28,556 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-21 12:22:28,556 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-21 12:22:28,658 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-21 12:22:28,658 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 3 
    [junit] 2011-03-21 12:22:28,658 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-21 12:22:28,660 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 55720
    [junit] 2011-03-21 12:22:28,660 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 55720: exiting
    [junit] 2011-03-21 12:22:28,661 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 55720
    [junit] 2011-03-21 12:22:28,661 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.639 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 49 minutes 27 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 89896, r=ReplicaInPipeline, blk_-8157901986060962899_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 0   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-8157901986060962899   bytesAcked=0   bytesOnDisk=0

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 89896, r=ReplicaInPipeline, blk_-8157901986060962899_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 0
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-8157901986060962899
  bytesAcked=0
  bytesOnDisk=0
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1387)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2021)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tfi(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)




Hadoop-Hdfs-trunk - Build # 612 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/612/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 695456 lines...]
    [junit] 2011-03-20 12:22:42,323 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-20 12:22:42,324 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-20 12:22:42,324 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:49595, storageID=DS-765468382-127.0.1.1-49595-1300623751741, infoPort=37061, ipcPort=59644):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-03-20 12:22:42,324 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 59644
    [junit] 2011-03-20 12:22:42,324 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-20 12:22:42,324 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-20 12:22:42,325 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-20 12:22:42,325 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-20 12:22:42,325 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-20 12:22:42,427 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 56909
    [junit] 2011-03-20 12:22:42,427 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 56909: exiting
    [junit] 2011-03-20 12:22:42,427 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 56909
    [junit] 2011-03-20 12:22:42,427 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] 2011-03-20 12:22:42,427 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-03-20 12:22:42,427 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:42236, storageID=DS-841031619-127.0.1.1-42236-1300623751567, infoPort=38387, ipcPort=56909):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-20 12:22:42,430 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-20 12:22:42,530 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-20 12:22:42,530 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:42236, storageID=DS-841031619-127.0.1.1-42236-1300623751567, infoPort=38387, ipcPort=56909):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-20 12:22:42,531 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 56909
    [junit] 2011-03-20 12:22:42,531 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-20 12:22:42,531 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-20 12:22:42,531 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-20 12:22:42,532 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-20 12:22:42,633 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-20 12:22:42,633 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-20 12:22:42,634 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 3 
    [junit] 2011-03-20 12:22:42,635 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 40189
    [junit] 2011-03-20 12:22:42,635 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 40189: exiting
    [junit] 2011-03-20 12:22:42,636 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 40189
    [junit] 2011-03-20 12:22:42,636 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.667 sec

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:747: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:505: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:688: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:662: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:730: Tests failed!

Total time: 49 minutes 43 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_29

Error Message:
null

Stack Trace:
junit.framework.AssertionFailedError: 
	at org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.runTest29_30(TestFiDataTransferProtocol2.java:153)
	at org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_29(TestFiDataTransferProtocol2.java:251)




Re: Hadoop-Hdfs-trunk - Build # 611 - Still Failing

Posted by Eli Collins <el...@cloudera.com>.
The TestFiRename test failure is HDFS-1770, which I've committed the fix for.


On Sat, Mar 19, 2011 at 5:31 AM, Apache Hudson Server
<hu...@hudson.apache.org> wrote:
> See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/611/
>
> ###################################################################################
> ########################## LAST 60 LINES OF THE CONSOLE ###########################
> [...truncated 751238 lines...]
>    [junit] 2011-03-19 12:31:13,764 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
>    [junit] 2011-03-19 12:31:13,765 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
>    [junit] 2011-03-19 12:31:13,765 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:54442, storageID=DS-347111320-127.0.1.1-54442-1300537863165, infoPort=59494, ipcPort=38135):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
>    [junit] 2011-03-19 12:31:13,765 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 38135
>    [junit] 2011-03-19 12:31:13,765 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
>    [junit] 2011-03-19 12:31:13,766 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
>    [junit] 2011-03-19 12:31:13,766 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
>    [junit] 2011-03-19 12:31:13,766 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
>    [junit] 2011-03-19 12:31:13,767 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
>    [junit] 2011-03-19 12:31:13,868 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 48381
>    [junit] 2011-03-19 12:31:13,868 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 48381: exiting
>    [junit] 2011-03-19 12:31:13,869 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 48381
>    [junit] 2011-03-19 12:31:13,869 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 1
>    [junit] 2011-03-19 12:31:13,869 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:33450, storageID=DS-19390335-127.0.1.1-33450-1300537863003, infoPort=43863, ipcPort=48381):DataXceiveServer: java.nio.channels.AsynchronousCloseException
>    [junit]     at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
>    [junit]     at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
>    [junit]     at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
>    [junit]     at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
>    [junit]     at java.lang.Thread.run(Thread.java:662)
>    [junit]
>    [junit] 2011-03-19 12:31:13,869 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
>    [junit] 2011-03-19 12:31:13,871 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
>    [junit] 2011-03-19 12:31:13,972 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
>    [junit] 2011-03-19 12:31:13,972 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:33450, storageID=DS-19390335-127.0.1.1-33450-1300537863003, infoPort=43863, ipcPort=48381):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
>    [junit] 2011-03-19 12:31:13,972 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 48381
>    [junit] 2011-03-19 12:31:13,972 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
>    [junit] 2011-03-19 12:31:13,972 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
>    [junit] 2011-03-19 12:31:13,973 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
>    [junit] 2011-03-19 12:31:13,973 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
>    [junit] 2011-03-19 12:31:14,074 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
>    [junit] 2011-03-19 12:31:14,075 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 2
>    [junit] 2011-03-19 12:31:14,075 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
>    [junit] 2011-03-19 12:31:14,076 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 53197
>    [junit] 2011-03-19 12:31:14,077 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 53197: exiting
>    [junit] 2011-03-19 12:31:14,077 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
>    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.524 sec
>    [junit] 2011-03-19 12:31:14,095 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 53197
>
> checkfailure:
>    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed
>
> BUILD FAILED
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:747: The following error occurred while executing this line:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:505: The following error occurred while executing this line:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:688: The following error occurred while executing this line:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:662: The following error occurred while executing this line:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:730: Tests failed!
>
> Total time: 58 minutes 8 seconds
> [FINDBUGS] Skipping publisher since build result is FAILURE
> Publishing Javadoc
> Archiving artifacts
> Recording test results
> Recording fingerprints
> Publishing Clover coverage report...
> No Clover report will be published due to a Build Failure
> Email was triggered for: Failure
> Sending email for trigger: Failure
>
>
>
> ###################################################################################
> ############################## FAILED TESTS (if any) ##############################
> 3 tests failed.
> REGRESSION:  org.apache.hadoop.fs.TestFiRename.testFailureNonExistentDst
>
> Error Message:
> Internal error: default blockSize is not a multiple of default bytesPerChecksum
>
> Stack Trace:
> java.io.IOException: Internal error: default blockSize is not a multiple of default bytesPerChecksum
>        at org.apache.hadoop.fs.AbstractFileSystem.create(AbstractFileSystem.java:506)
>        at org.apache.hadoop.fs.FileContext$2.next(FileContext.java:576)
>        at org.apache.hadoop.fs.FileContext$2.next(FileContext.java:573)
>        at org.apache.hadoop.fs.FileContext$FSLinkResolver.resolve(FileContext.java:2215)
>        at org.apache.hadoop.fs.FileContext.create(FileContext.java:573)
>        at org.apache.hadoop.fs.TestFiRename.createFile(TestFiRename.java:141)
>        at org.apache.hadoop.fs.TestFiRename.testFailureNonExistentDst(TestFiRename.java:152)
>
>
> REGRESSION:  org.apache.hadoop.fs.TestFiRename.testFailuresExistingDst
>
> Error Message:
> Internal error: default blockSize is not a multiple of default bytesPerChecksum
>
> Stack Trace:
> java.io.IOException: Internal error: default blockSize is not a multiple of default bytesPerChecksum
>        at org.apache.hadoop.fs.AbstractFileSystem.create(AbstractFileSystem.java:506)
>        at org.apache.hadoop.fs.FileContext$2.next(FileContext.java:576)
>        at org.apache.hadoop.fs.FileContext$2.next(FileContext.java:573)
>        at org.apache.hadoop.fs.FileContext$FSLinkResolver.resolve(FileContext.java:2215)
>        at org.apache.hadoop.fs.FileContext.create(FileContext.java:573)
>        at org.apache.hadoop.fs.TestFiRename.createFile(TestFiRename.java:141)
>        at org.apache.hadoop.fs.TestFiRename.testFailuresExistingDst(TestFiRename.java:168)
>
>
> REGRESSION:  org.apache.hadoop.fs.TestFiRename.testDeletionOfDstFile
>
> Error Message:
> Internal error: default blockSize is not a multiple of default bytesPerChecksum
>
> Stack Trace:
> java.io.IOException: Internal error: default blockSize is not a multiple of default bytesPerChecksum
>        at org.apache.hadoop.fs.AbstractFileSystem.create(AbstractFileSystem.java:506)
>        at org.apache.hadoop.fs.FileContext$2.next(FileContext.java:576)
>        at org.apache.hadoop.fs.FileContext$2.next(FileContext.java:573)
>        at org.apache.hadoop.fs.FileContext$FSLinkResolver.resolve(FileContext.java:2215)
>        at org.apache.hadoop.fs.FileContext.create(FileContext.java:573)
>        at org.apache.hadoop.fs.TestFiRename.createFile(TestFiRename.java:141)
>        at org.apache.hadoop.fs.TestFiRename.testDeletionOfDstFile(TestFiRename.java:189)
>
>
>
>

Hadoop-Hdfs-trunk - Build # 611 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/611/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 751238 lines...]
    [junit] 2011-03-19 12:31:13,764 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-19 12:31:13,765 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-19 12:31:13,765 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:54442, storageID=DS-347111320-127.0.1.1-54442-1300537863165, infoPort=59494, ipcPort=38135):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-03-19 12:31:13,765 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 38135
    [junit] 2011-03-19 12:31:13,765 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-19 12:31:13,766 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-19 12:31:13,766 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-19 12:31:13,766 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-19 12:31:13,767 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-19 12:31:13,868 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 48381
    [junit] 2011-03-19 12:31:13,868 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 48381: exiting
    [junit] 2011-03-19 12:31:13,869 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 48381
    [junit] 2011-03-19 12:31:13,869 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-03-19 12:31:13,869 WARN  datanode.DataNode (DataXceiverServer.java:run(142)) - DatanodeRegistration(127.0.0.1:33450, storageID=DS-19390335-127.0.1.1-33450-1300537863003, infoPort=43863, ipcPort=48381):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:135)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-19 12:31:13,869 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] 2011-03-19 12:31:13,871 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-19 12:31:13,972 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(624)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-19 12:31:13,972 INFO  datanode.DataNode (DataNode.java:run(1464)) - DatanodeRegistration(127.0.0.1:33450, storageID=DS-19390335-127.0.1.1-33450-1300537863003, infoPort=43863, ipcPort=48381):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-19 12:31:13,972 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 48381
    [junit] 2011-03-19 12:31:13,972 INFO  datanode.DataNode (DataNode.java:shutdown(788)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-19 12:31:13,972 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-19 12:31:13,973 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-19 12:31:13,973 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-19 12:31:14,074 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2856)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-19 12:31:14,075 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 2 
    [junit] 2011-03-19 12:31:14,075 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-19 12:31:14,076 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 53197
    [junit] 2011-03-19 12:31:14,077 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 53197: exiting
    [junit] 2011-03-19 12:31:14,077 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.524 sec
    [junit] 2011-03-19 12:31:14,095 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 53197

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:747: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:505: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:688: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:662: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:730: Tests failed!

Total time: 58 minutes 8 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
REGRESSION:  org.apache.hadoop.fs.TestFiRename.testFailureNonExistentDst

Error Message:
Internal error: default blockSize is not a multiple of default bytesPerChecksum 

Stack Trace:
java.io.IOException: Internal error: default blockSize is not a multiple of default bytesPerChecksum 
	at org.apache.hadoop.fs.AbstractFileSystem.create(AbstractFileSystem.java:506)
	at org.apache.hadoop.fs.FileContext$2.next(FileContext.java:576)
	at org.apache.hadoop.fs.FileContext$2.next(FileContext.java:573)
	at org.apache.hadoop.fs.FileContext$FSLinkResolver.resolve(FileContext.java:2215)
	at org.apache.hadoop.fs.FileContext.create(FileContext.java:573)
	at org.apache.hadoop.fs.TestFiRename.createFile(TestFiRename.java:141)
	at org.apache.hadoop.fs.TestFiRename.testFailureNonExistentDst(TestFiRename.java:152)


REGRESSION:  org.apache.hadoop.fs.TestFiRename.testFailuresExistingDst

Error Message:
Internal error: default blockSize is not a multiple of default bytesPerChecksum 

Stack Trace:
java.io.IOException: Internal error: default blockSize is not a multiple of default bytesPerChecksum 
	at org.apache.hadoop.fs.AbstractFileSystem.create(AbstractFileSystem.java:506)
	at org.apache.hadoop.fs.FileContext$2.next(FileContext.java:576)
	at org.apache.hadoop.fs.FileContext$2.next(FileContext.java:573)
	at org.apache.hadoop.fs.FileContext$FSLinkResolver.resolve(FileContext.java:2215)
	at org.apache.hadoop.fs.FileContext.create(FileContext.java:573)
	at org.apache.hadoop.fs.TestFiRename.createFile(TestFiRename.java:141)
	at org.apache.hadoop.fs.TestFiRename.testFailuresExistingDst(TestFiRename.java:168)


REGRESSION:  org.apache.hadoop.fs.TestFiRename.testDeletionOfDstFile

Error Message:
Internal error: default blockSize is not a multiple of default bytesPerChecksum 

Stack Trace:
java.io.IOException: Internal error: default blockSize is not a multiple of default bytesPerChecksum 
	at org.apache.hadoop.fs.AbstractFileSystem.create(AbstractFileSystem.java:506)
	at org.apache.hadoop.fs.FileContext$2.next(FileContext.java:576)
	at org.apache.hadoop.fs.FileContext$2.next(FileContext.java:573)
	at org.apache.hadoop.fs.FileContext$FSLinkResolver.resolve(FileContext.java:2215)
	at org.apache.hadoop.fs.FileContext.create(FileContext.java:573)
	at org.apache.hadoop.fs.TestFiRename.createFile(TestFiRename.java:141)
	at org.apache.hadoop.fs.TestFiRename.testDeletionOfDstFile(TestFiRename.java:189)




Hadoop-Hdfs-trunk - Build # 610 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/610/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 702511 lines...]
    [junit] 2011-03-18 12:32:25,462 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-18 12:32:25,462 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-18 12:32:25,564 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 33248
    [junit] 2011-03-18 12:32:25,564 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 33248: exiting
    [junit] 2011-03-18 12:32:25,565 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 33248
    [junit] 2011-03-18 12:32:25,565 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-03-18 12:32:25,565 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:40492, storageID=DS-1069008913-127.0.1.1-40492-1300451534737, infoPort=48021, ipcPort=33248):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-18 12:32:25,565 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] 2011-03-18 12:32:25,567 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-18 12:32:25,668 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-18 12:32:25,668 INFO  datanode.DataNode (DataNode.java:run(1462)) - DatanodeRegistration(127.0.0.1:40492, storageID=DS-1069008913-127.0.1.1-40492-1300451534737, infoPort=48021, ipcPort=33248):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-18 12:32:25,668 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 33248
    [junit] 2011-03-18 12:32:25,668 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-18 12:32:25,669 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-18 12:32:25,669 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-18 12:32:25,669 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-18 12:32:25,771 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-18 12:32:25,771 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 3 
    [junit] 2011-03-18 12:32:25,771 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2854)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-18 12:32:25,773 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 34987
    [junit] 2011-03-18 12:32:25,773 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 34987: exiting
    [junit] 2011-03-18 12:32:25,773 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 5 on 34987: exiting
    [junit] 2011-03-18 12:32:25,773 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 3 on 34987: exiting
    [junit] 2011-03-18 12:32:25,774 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 1 on 34987: exiting
    [junit] 2011-03-18 12:32:25,774 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 2 on 34987: exiting
    [junit] 2011-03-18 12:32:25,774 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 7 on 34987: exiting
    [junit] 2011-03-18 12:32:25,774 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] 2011-03-18 12:32:25,774 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 34987
    [junit] 2011-03-18 12:32:25,774 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 8 on 34987: exiting
    [junit] 2011-03-18 12:32:25,774 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 9 on 34987: exiting
    [junit] 2011-03-18 12:32:25,774 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 6 on 34987: exiting
    [junit] 2011-03-18 12:32:25,774 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 4 on 34987: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.348 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 59 minutes 59 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw

Error Message:
65536 = numBytes < visible = 66996, r=ReplicaInPipeline, blk_-9037085292264431097_1001, TEMPORARY   getNumBytes()     = 65536   getBytesOnDisk()  = 0   getVisibleLength()= -1   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-9037085292264431097   bytesAcked=0   bytesOnDisk=0

Stack Trace:
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: 65536 = numBytes < visible = 66996, r=ReplicaInPipeline, blk_-9037085292264431097_1001, TEMPORARY
  getNumBytes()     = 65536
  getBytesOnDisk()  = 0
  getVisibleLength()= -1
  getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
  getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/tmp/blk_-9037085292264431097
  bytesAcked=0
  bytesOnDisk=0
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.convertTemporaryToRbw(FSDataset.java:1383)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.convertTemporaryToRbw(DataNode.java:2019)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.__CLR3_0_2r95sa9tep(TestTransferRbw.java:121)
	at org.apache.hadoop.hdfs.server.datanode.TestTransferRbw.testTransferRbw(TestTransferRbw.java:63)




Hadoop-Hdfs-trunk - Build # 609 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/609/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 704012 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-11592 / http-11593 / https-11594
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:11593
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.486 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.555 sec
   [cactus] Tomcat 5.x started on port [11593]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.311 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.343 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.875 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:49: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 51 minutes 16 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 608 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/608/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 686601 lines...]
    [junit] 2011-03-16 12:31:20,198 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-16 12:31:20,299 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 33960
    [junit] 2011-03-16 12:31:20,299 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 33960: exiting
    [junit] 2011-03-16 12:31:20,300 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 33960
    [junit] 2011-03-16 12:31:20,300 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] 2011-03-16 12:31:20,300 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:46068, storageID=DS-1231135676-127.0.1.1-46068-1300278669382, infoPort=34367, ipcPort=33960):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-16 12:31:20,300 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-16 12:31:20,398 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-16 12:31:20,401 INFO  datanode.DataNode (DataNode.java:run(1462)) - DatanodeRegistration(127.0.0.1:46068, storageID=DS-1231135676-127.0.1.1-46068-1300278669382, infoPort=34367, ipcPort=33960):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-16 12:31:20,401 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 33960
    [junit] 2011-03-16 12:31:20,401 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-16 12:31:20,401 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-16 12:31:20,402 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-16 12:31:20,402 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-16 12:31:20,504 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2854)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-16 12:31:20,504 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 4 
    [junit] 2011-03-16 12:31:20,504 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-16 12:31:20,505 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 52365
    [junit] 2011-03-16 12:31:20,506 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 52365: exiting
    [junit] 2011-03-16 12:31:20,506 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 5 on 52365: exiting
    [junit] 2011-03-16 12:31:20,506 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 6 on 52365: exiting
    [junit] 2011-03-16 12:31:20,506 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 9 on 52365: exiting
    [junit] 2011-03-16 12:31:20,506 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 4 on 52365: exiting
    [junit] 2011-03-16 12:31:20,506 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 3 on 52365: exiting
    [junit] 2011-03-16 12:31:20,506 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 1 on 52365: exiting
    [junit] 2011-03-16 12:31:20,506 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 2 on 52365: exiting
    [junit] 2011-03-16 12:31:20,506 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 52365
    [junit] 2011-03-16 12:31:20,506 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 7 on 52365: exiting
    [junit] 2011-03-16 12:31:20,506 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 8 on 52365: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.653 sec
    [junit] 2011-03-16 12:31:20,506 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:747: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:505: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:688: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:662: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:730: Tests failed!

Total time: 58 minutes 25 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_29

Error Message:
Forked Java VM exited abnormally. Please note the time in the report does not reflect the time until the VM exit.

Stack Trace:
junit.framework.AssertionFailedError: Forked Java VM exited abnormally. Please note the time in the report does not reflect the time until the VM exit.


REGRESSION:  org.apache.hadoop.hdfs.TestDecommission.testHostsFile

Error Message:
Problem binding to /0.0.0.0:50020 : Address already in use

Stack Trace:
java.net.BindException: Problem binding to /0.0.0.0:50020 : Address already in use
	at org.apache.hadoop.ipc.Server.bind(Server.java:221)
	at org.apache.hadoop.ipc.Server$Listener.<init>(Server.java:310)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1515)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:576)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:338)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:298)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:46)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:550)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:422)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:513)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:283)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:265)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1578)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1521)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1488)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestDecommission.__CLR3_0_2moi8ys10t5(TestDecommission.java:378)
	at org.apache.hadoop.hdfs.TestDecommission.testHostsFile(TestDecommission.java:375)
Caused by: java.net.BindException: Address already in use
	at sun.nio.ch.Net.bind(Native Method)
	at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:126)
	at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:59)
	at org.apache.hadoop.ipc.Server.bind(Server.java:219)




Hadoop-Hdfs-trunk - Build # 607 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/607/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 703396 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-57474 / http-57475 / https-57476
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:57475
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.476 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.34 sec
   [cactus] Tomcat 5.x started on port [57475]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.32 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.303 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.842 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 50 minutes 42 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 606 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/606/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 696403 lines...]
    [junit] 2011-03-14 12:25:38,462 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-14 12:25:38,462 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-14 12:25:38,563 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 50062
    [junit] 2011-03-14 12:25:38,564 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 50062: exiting
    [junit] 2011-03-14 12:25:38,564 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 50062
    [junit] 2011-03-14 12:25:38,564 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] 2011-03-14 12:25:38,564 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-03-14 12:25:38,564 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:43047, storageID=DS-73264017-127.0.1.1-43047-1300105527845, infoPort=47732, ipcPort=50062):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-14 12:25:38,567 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-14 12:25:38,667 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-14 12:25:38,668 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:43047, storageID=DS-73264017-127.0.1.1-43047-1300105527845, infoPort=47732, ipcPort=50062):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-14 12:25:38,668 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 50062
    [junit] 2011-03-14 12:25:38,668 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-14 12:25:38,668 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-14 12:25:38,668 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-14 12:25:38,669 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-14 12:25:38,771 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2854)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-14 12:25:38,771 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 4 
    [junit] 2011-03-14 12:25:38,771 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-14 12:25:38,773 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 40816
    [junit] 2011-03-14 12:25:38,773 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 40816: exiting
    [junit] 2011-03-14 12:25:38,773 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 4 on 40816: exiting
    [junit] 2011-03-14 12:25:38,773 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 9 on 40816: exiting
    [junit] 2011-03-14 12:25:38,773 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 2 on 40816: exiting
    [junit] 2011-03-14 12:25:38,774 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 6 on 40816: exiting
    [junit] 2011-03-14 12:25:38,774 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 40816
    [junit] 2011-03-14 12:25:38,774 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] 2011-03-14 12:25:38,773 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 8 on 40816: exiting
    [junit] 2011-03-14 12:25:38,773 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 1 on 40816: exiting
    [junit] 2011-03-14 12:25:38,773 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 3 on 40816: exiting
    [junit] 2011-03-14 12:25:38,773 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 5 on 40816: exiting
    [junit] 2011-03-14 12:25:38,773 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 7 on 40816: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.641 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 52 minutes 46 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09

Error Message:
Was waiting too long for a replica to become TEMPORARY

Stack Trace:
junit.framework.AssertionFailedError: Was waiting too long for a replica to become TEMPORARY
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.waitForTempReplica(TestBlockReport.java:514)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2fte182raa(TestBlockReport.java:451)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09(TestBlockReport.java:429)




Hadoop-Hdfs-trunk - Build # 605 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/605/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 724220 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:445)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:633)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-13 12:25:33,635 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-13 12:25:33,735 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-13 12:25:33,735 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:53216, storageID=DS-968011023-127.0.1.1-53216-1300019123122, infoPort=58954, ipcPort=58071):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-13 12:25:33,735 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 58071
    [junit] 2011-03-13 12:25:33,736 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-13 12:25:33,736 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-13 12:25:33,736 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-13 12:25:33,736 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-13 12:25:33,838 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2854)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-13 12:25:33,838 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 3 
    [junit] 2011-03-13 12:25:33,838 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-13 12:25:33,839 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 43712
    [junit] 2011-03-13 12:25:33,840 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 43712: exiting
    [junit] 2011-03-13 12:25:33,840 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 1 on 43712: exiting
    [junit] 2011-03-13 12:25:33,840 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 2 on 43712: exiting
    [junit] 2011-03-13 12:25:33,840 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 8 on 43712: exiting
    [junit] 2011-03-13 12:25:33,840 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 9 on 43712: exiting
    [junit] 2011-03-13 12:25:33,841 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 6 on 43712: exiting
    [junit] 2011-03-13 12:25:33,841 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 7 on 43712: exiting
    [junit] 2011-03-13 12:25:33,841 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 3 on 43712: exiting
    [junit] 2011-03-13 12:25:33,841 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] 2011-03-13 12:25:33,841 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 43712
    [junit] 2011-03-13 12:25:33,840 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 5 on 43712: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.281 sec
    [junit] 2011-03-13 12:25:33,847 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 4 on 43712: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 52 minutes 22 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09

Error Message:
Was waiting too long for a replica to become TEMPORARY

Stack Trace:
junit.framework.AssertionFailedError: Was waiting too long for a replica to become TEMPORARY
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.waitForTempReplica(TestBlockReport.java:514)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2fte182raa(TestBlockReport.java:451)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09(TestBlockReport.java:429)




Hadoop-Hdfs-trunk - Build # 604 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/604/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 704509 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:445)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:633)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-12 12:34:51,531 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-12 12:34:51,631 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-12 12:34:51,631 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:58228, storageID=DS-1627632864-127.0.1.1-58228-1299933280633, infoPort=53994, ipcPort=51584):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-12 12:34:51,631 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 51584
    [junit] 2011-03-12 12:34:51,632 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-12 12:34:51,632 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-12 12:34:51,632 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-12 12:34:51,632 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-12 12:34:51,734 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2854)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-12 12:34:51,734 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-12 12:34:51,734 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 4 
    [junit] 2011-03-12 12:34:51,736 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 50545
    [junit] 2011-03-12 12:34:51,736 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 1 on 50545: exiting
    [junit] 2011-03-12 12:34:51,736 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 50545: exiting
    [junit] 2011-03-12 12:34:51,736 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 2 on 50545: exiting
    [junit] 2011-03-12 12:34:51,737 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 3 on 50545: exiting
    [junit] 2011-03-12 12:34:51,737 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 4 on 50545: exiting
    [junit] 2011-03-12 12:34:51,737 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 6 on 50545: exiting
    [junit] 2011-03-12 12:34:51,737 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 5 on 50545: exiting
    [junit] 2011-03-12 12:34:51,737 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 7 on 50545: exiting
    [junit] 2011-03-12 12:34:51,738 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 8 on 50545: exiting
    [junit] 2011-03-12 12:34:51,738 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 9 on 50545: exiting
    [junit] 2011-03-12 12:34:51,738 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 50545
    [junit] 2011-03-12 12:34:51,739 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.593 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 61 minutes 40 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09

Error Message:
Was waiting too long for a replica to become TEMPORARY

Stack Trace:
junit.framework.AssertionFailedError: Was waiting too long for a replica to become TEMPORARY
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.waitForTempReplica(TestBlockReport.java:514)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2fte182raa(TestBlockReport.java:451)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09(TestBlockReport.java:429)




Hadoop-Hdfs-trunk - Build # 603 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/603/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 702030 lines...]
    [junit] 2011-03-11 12:32:49,243 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-11 12:32:49,243 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-11 12:32:49,244 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-11 12:32:49,354 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 55399
    [junit] 2011-03-11 12:32:49,355 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 55399: exiting
    [junit] 2011-03-11 12:32:49,355 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 55399
    [junit] 2011-03-11 12:32:49,355 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:48967, storageID=DS-1670833288-127.0.1.1-48967-1299846758473, infoPort=46766, ipcPort=55399):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-11 12:32:49,355 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] 2011-03-11 12:32:49,356 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-11 12:32:49,456 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-11 12:32:49,456 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:48967, storageID=DS-1670833288-127.0.1.1-48967-1299846758473, infoPort=46766, ipcPort=55399):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-11 12:32:49,457 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 55399
    [junit] 2011-03-11 12:32:49,457 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-11 12:32:49,457 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-11 12:32:49,457 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-11 12:32:49,458 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-11 12:32:49,559 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-11 12:32:49,559 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 2 
    [junit] 2011-03-11 12:32:49,559 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2854)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-11 12:32:49,561 INFO  ipc.Server (Server.java:stop(1624)) - Stopping server on 51176
    [junit] 2011-03-11 12:32:49,561 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 0 on 51176: exiting
    [junit] 2011-03-11 12:32:49,561 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 1 on 51176: exiting
    [junit] 2011-03-11 12:32:49,562 INFO  ipc.Server (Server.java:run(689)) - Stopping IPC Server Responder
    [junit] 2011-03-11 12:32:49,562 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 5 on 51176: exiting
    [junit] 2011-03-11 12:32:49,562 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 51176
    [junit] 2011-03-11 12:32:49,562 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 3 on 51176: exiting
    [junit] 2011-03-11 12:32:49,562 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 4 on 51176: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.441 sec
    [junit] 2011-03-11 12:32:49,570 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 2 on 51176: exiting
    [junit] 2011-03-11 12:32:49,570 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 8 on 51176: exiting
    [junit] 2011-03-11 12:32:49,570 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 7 on 51176: exiting
    [junit] 2011-03-11 12:32:49,570 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 6 on 51176: exiting
    [junit] 2011-03-11 12:32:49,570 INFO  ipc.Server (Server.java:run(1457)) - IPC Server handler 9 on 51176: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 59 minutes 54 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08

Error Message:
Was waiting too long for a replica to become TEMPORARY

Stack Trace:
junit.framework.AssertionFailedError: Was waiting too long for a replica to become TEMPORARY
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.waitForTempReplica(TestBlockReport.java:514)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2j2e00jr9p(TestBlockReport.java:408)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08(TestBlockReport.java:390)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0

Error Message:
127.0.0.1:37712is not an underUtilized node

Stack Trace:
junit.framework.AssertionFailedError: 127.0.0.1:37712is not an underUtilized node
	at org.apache.hadoop.hdfs.server.balancer.Balancer.initNodes(Balancer.java:1011)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.initNodes(Balancer.java:953)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1496)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_29j3j5bsqr(TestBalancer.java:327)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0(TestBalancer.java:324)




Hadoop-Hdfs-trunk - Build # 602 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/602/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 712409 lines...]
    [junit] 2011-03-10 12:34:33,978 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-10 12:34:33,978 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-10 12:34:33,978 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-10 12:34:34,080 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 44610
    [junit] 2011-03-10 12:34:34,080 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 0 on 44610: exiting
    [junit] 2011-03-10 12:34:34,080 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 44610
    [junit] 2011-03-10 12:34:34,081 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:54622, storageID=DS-1811748547-127.0.1.1-54622-1299760463191, infoPort=37842, ipcPort=44610):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-10 12:34:34,081 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-03-10 12:34:34,080 INFO  ipc.Server (Server.java:run(687)) - Stopping IPC Server Responder
    [junit] 2011-03-10 12:34:34,181 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-10 12:34:34,182 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:54622, storageID=DS-1811748547-127.0.1.1-54622-1299760463191, infoPort=37842, ipcPort=44610):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-10 12:34:34,182 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 44610
    [junit] 2011-03-10 12:34:34,182 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-10 12:34:34,182 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-10 12:34:34,182 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-10 12:34:34,183 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-10 12:34:34,288 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-10 12:34:34,288 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2854)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-10 12:34:34,289 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 2 
    [junit] 2011-03-10 12:34:34,290 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 34507
    [junit] 2011-03-10 12:34:34,291 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 0 on 34507: exiting
    [junit] 2011-03-10 12:34:34,291 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 1 on 34507: exiting
    [junit] 2011-03-10 12:34:34,291 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 2 on 34507: exiting
    [junit] 2011-03-10 12:34:34,291 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 34507
    [junit] 2011-03-10 12:34:34,291 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 3 on 34507: exiting
    [junit] 2011-03-10 12:34:34,291 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 8 on 34507: exiting
    [junit] 2011-03-10 12:34:34,291 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 6 on 34507: exiting
    [junit] 2011-03-10 12:34:34,291 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 5 on 34507: exiting
    [junit] 2011-03-10 12:34:34,291 INFO  ipc.Server (Server.java:run(687)) - Stopping IPC Server Responder
    [junit] 2011-03-10 12:34:34,291 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 4 on 34507: exiting
    [junit] 2011-03-10 12:34:34,291 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 9 on 34507: exiting
    [junit] 2011-03-10 12:34:34,291 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 7 on 34507: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.752 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 61 minutes 24 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0

Error Message:
127.0.0.1:44347is not an underUtilized node

Stack Trace:
junit.framework.AssertionFailedError: 127.0.0.1:44347is not an underUtilized node
	at org.apache.hadoop.hdfs.server.balancer.Balancer.initNodes(Balancer.java:1011)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.initNodes(Balancer.java:953)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1496)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_29j3j5bsqp(TestBalancer.java:327)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0(TestBalancer.java:324)




Hadoop-Hdfs-trunk - Build # 601 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/601/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 709524 lines...]
    [junit] 2011-03-09 12:34:57,741 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-09 12:34:57,741 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-09 12:34:57,842 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 59292
    [junit] 2011-03-09 12:34:57,843 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 0 on 59292: exiting
    [junit] 2011-03-09 12:34:57,843 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 59292
    [junit] 2011-03-09 12:34:57,843 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-03-09 12:34:57,843 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:53741, storageID=DS-820295002-127.0.1.1-53741-1299674087032, infoPort=36626, ipcPort=59292):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-09 12:34:57,843 INFO  ipc.Server (Server.java:run(687)) - Stopping IPC Server Responder
    [junit] 2011-03-09 12:34:57,845 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-09 12:34:57,946 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-09 12:34:57,946 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:53741, storageID=DS-820295002-127.0.1.1-53741-1299674087032, infoPort=36626, ipcPort=59292):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-09 12:34:57,946 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 59292
    [junit] 2011-03-09 12:34:57,946 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-09 12:34:57,947 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-09 12:34:57,947 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-09 12:34:57,947 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-09 12:34:58,049 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-09 12:34:58,049 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 3 
    [junit] 2011-03-09 12:34:58,049 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2854)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-09 12:34:58,050 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 34632
    [junit] 2011-03-09 12:34:58,050 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 1 on 34632: exiting
    [junit] 2011-03-09 12:34:58,051 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 8 on 34632: exiting
    [junit] 2011-03-09 12:34:58,051 INFO  ipc.Server (Server.java:run(687)) - Stopping IPC Server Responder
    [junit] 2011-03-09 12:34:58,050 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 4 on 34632: exiting
    [junit] 2011-03-09 12:34:58,050 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 5 on 34632: exiting
    [junit] 2011-03-09 12:34:58,050 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 3 on 34632: exiting
    [junit] 2011-03-09 12:34:58,051 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 34632
    [junit] 2011-03-09 12:34:58,051 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 9 on 34632: exiting
    [junit] 2011-03-09 12:34:58,051 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 6 on 34632: exiting
    [junit] 2011-03-09 12:34:58,051 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 7 on 34632: exiting
    [junit] 2011-03-09 12:34:58,052 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 2 on 34632: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.812 sec
    [junit] 2011-03-09 12:34:58,055 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 0 on 34632: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:749: Tests failed!

Total time: 61 minutes 34 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.namenode.TestNodeCount.testNodeCount

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.namenode.BlockManager.countNodes(BlockManager.java:1431)
	at org.apache.hadoop.hdfs.server.namenode.TestNodeCount.__CLR3_0_29bdgm6s8c(TestNodeCount.java:90)
	at org.apache.hadoop.hdfs.server.namenode.TestNodeCount.testNodeCount(TestNodeCount.java:40)




Hadoop-Hdfs-trunk - Build # 600 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/600/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 711758 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-14315 / http-14316 / https-14317
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:14316
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.514 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.333 sec
   [cactus] Tomcat 5.x started on port [14316]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.323 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.32 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.831 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:753: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:734: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 52 minutes 8 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 599 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/599/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 719738 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-63314 / http-63315 / https-63316
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:63315
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.457 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.316 sec
   [cactus] Tomcat 5.x started on port [63315]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.329 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.33 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.825 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:750: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:731: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 52 minutes 4 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 598 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/598/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 704399 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-26393 / http-26394 / https-26395
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:26394
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.461 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.343 sec
   [cactus] Tomcat 5.x started on port [26394]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.315 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.328 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.825 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:750: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:731: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 52 minutes 23 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 597 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/597/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 702068 lines...]
    [junit] 2011-03-05 15:57:50,148 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-05 15:57:50,149 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-05 15:57:50,149 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-03-05 15:57:50,250 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 32794
    [junit] 2011-03-05 15:57:50,251 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 0 on 32794: exiting
    [junit] 2011-03-05 15:57:50,251 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 32794
    [junit] 2011-03-05 15:57:50,251 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:39757, storageID=DS-1671495469-127.0.1.1-39757-1299340659374, infoPort=41007, ipcPort=32794):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-03-05 15:57:50,251 INFO  ipc.Server (Server.java:run(687)) - Stopping IPC Server Responder
    [junit] 2011-03-05 15:57:50,251 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-03-05 15:57:50,354 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-05 15:57:50,354 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:39757, storageID=DS-1671495469-127.0.1.1-39757-1299340659374, infoPort=41007, ipcPort=32794):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-05 15:57:50,354 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 32794
    [junit] 2011-03-05 15:57:50,354 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-05 15:57:50,355 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-05 15:57:50,355 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-05 15:57:50,355 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-05 15:57:50,457 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2854)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-05 15:57:50,457 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 4 
    [junit] 2011-03-05 15:57:50,457 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-05 15:57:50,458 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 41611
    [junit] 2011-03-05 15:57:50,459 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 0 on 41611: exiting
    [junit] 2011-03-05 15:57:50,459 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 1 on 41611: exiting
    [junit] 2011-03-05 15:57:50,459 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 9 on 41611: exiting
    [junit] 2011-03-05 15:57:50,459 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 3 on 41611: exiting
    [junit] 2011-03-05 15:57:50,459 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 6 on 41611: exiting
    [junit] 2011-03-05 15:57:50,459 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 4 on 41611: exiting
    [junit] 2011-03-05 15:57:50,459 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 2 on 41611: exiting
    [junit] 2011-03-05 15:57:50,459 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 8 on 41611: exiting
    [junit] 2011-03-05 15:57:50,459 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 7 on 41611: exiting
    [junit] 2011-03-05 15:57:50,460 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 41611
    [junit] 2011-03-05 15:57:50,460 INFO  ipc.Server (Server.java:run(687)) - Stopping IPC Server Responder
    [junit] 2011-03-05 15:57:50,461 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 5 on 41611: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.777 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 105 minutes 59 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
REGRESSION:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testErrorReplicas

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.




Hadoop-Hdfs-trunk - Build # 596 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/596/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 741127 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-52382 / http-52383 / https-52384
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:52383
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.45 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.343 sec
   [cactus] Tomcat 5.x started on port [52383]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.342 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.321 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.838 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:750: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:731: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 61 minutes 42 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 595 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/595/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 740316 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:445)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:633)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-03-03 12:26:37,364 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-03 12:26:37,464 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-03-03 12:26:37,464 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:39230, storageID=DS-147357001-127.0.1.1-39230-1299155186617, infoPort=40649, ipcPort=49091):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-03-03 12:26:37,464 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 49091
    [junit] 2011-03-03 12:26:37,465 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-03-03 12:26:37,465 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-03-03 12:26:37,465 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-03-03 12:26:37,466 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-03-03 12:26:37,567 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2854)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-03 12:26:37,567 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-03-03 12:26:37,568 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 4 3 
    [junit] 2011-03-03 12:26:37,569 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 33095
    [junit] 2011-03-03 12:26:37,570 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 0 on 33095: exiting
    [junit] 2011-03-03 12:26:37,570 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 2 on 33095: exiting
    [junit] 2011-03-03 12:26:37,570 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 1 on 33095: exiting
    [junit] 2011-03-03 12:26:37,570 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 8 on 33095: exiting
    [junit] 2011-03-03 12:26:37,570 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 7 on 33095: exiting
    [junit] 2011-03-03 12:26:37,570 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 3 on 33095: exiting
    [junit] 2011-03-03 12:26:37,570 INFO  ipc.Server (Server.java:run(687)) - Stopping IPC Server Responder
    [junit] 2011-03-03 12:26:37,570 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 33095
    [junit] 2011-03-03 12:26:37,570 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 5 on 33095: exiting
    [junit] 2011-03-03 12:26:37,570 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 6 on 33095: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.469 sec
    [junit] 2011-03-03 12:26:37,570 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 4 on 33095: exiting
    [junit] 2011-03-03 12:26:37,571 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 9 on 33095: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 52 minutes 50 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09

Error Message:
Was waiting too long for a replica to become TEMPORARY

Stack Trace:
junit.framework.AssertionFailedError: Was waiting too long for a replica to become TEMPORARY
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.waitForTempReplica(TestBlockReport.java:514)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2fte182rac(TestBlockReport.java:451)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09(TestBlockReport.java:429)




Hadoop-Hdfs-trunk - Build # 594 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/594/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 719188 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-41970 / http-41971 / https-41972
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:41971
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.624 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.345 sec
   [cactus] Tomcat 5.x started on port [41971]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.32 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.321 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.857 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:750: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:731: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 60 minutes 19 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 593 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/593/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 718385 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-56330 / http-56331 / https-56332
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:56331
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.466 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.351 sec
   [cactus] Tomcat 5.x started on port [56331]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.332 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.342 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.886 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:750: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:731: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 51 minutes 22 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 592 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/592/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 716911 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-39686 / http-39687 / https-39688
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:39687
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.451 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.355 sec
   [cactus] Tomcat 5.x started on port [39687]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.31 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.316 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.878 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:750: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:731: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 61 minutes 19 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Re: Fwd: Hadoop-Hdfs-trunk - Build # 591 - Still Failing

Posted by Konstantin Boudnik <co...@apache.org>.
I have took a look at the test and I should note that the way they written if
kinda misleading. For instance the message we are seeing in the Hudson says 
  expected:<403> but was:<200>

where's the reality is that the expected was <200> and actual value was <403>.
Basically the order of assert calls is reversed in a number of places. While
this isn't a cause of the failure it is confuses the analysis.

That's be great to see a maintainer of this component to take a look at the
failures so we can eventually have a green HDFS bulld.

I have opened https://issues.apache.org/jira/browse/HDFS-1666 to track it

Cos

On Thu, Feb 24, 2011 at 10:14AM, Todd Lipcon wrote:
> Can someone familiar with hdfsproxy look into this consistent unit test
> failure? People voted in support of keeping this contrib, but it would be
> easier to be satisfied with that decision if someone stepped up to fix these
> tests that have been failing for quite some time.
> 
> -Todd
> 
> ---------- Forwarded message ----------
> From: Apache Hudson Server <hu...@hudson.apache.org>
> Date: Thu, Feb 24, 2011 at 4:36 AM
> Subject: Hadoop-Hdfs-trunk - Build # 591 - Still Failing
> To: hdfs-dev@hadoop.apache.org
> 
> 
> See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/591/
> 
> ###################################################################################
> ########################## LAST 60 LINES OF THE CONSOLE
> ###########################
> [...truncated 719693 lines...]
>    [mkdir] Created dir:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
>     [echo]  Including clover.jar in the war file ...
> [cactifywar] Analyzing war:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
> [cactifywar] Building war:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war
> 
> cactifywar:
> 
> test-cactus:
>     [echo]  Free Ports: startup-57271 / http-57272 / https-57273
>     [echo] Please take a deep breath while Cargo gets the Tomcat for running
> the servlet tests...
>    [mkdir] Created dir:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
>    [mkdir] Created dir:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
>    [mkdir] Created dir:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
>    [mkdir] Created dir:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
>    [mkdir] Created dir:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
>    [mkdir] Created dir:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
>     [copy] Copying 1 file to
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
>     [copy] Copying 1 file to
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
>     [copy] Copying 1 file to
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
>   [cactus] -----------------------------------------------------------------
>   [cactus] Running tests against Tomcat 5.x @ http://localhost:57272
>   [cactus] -----------------------------------------------------------------
>   [cactus] Deploying
> [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war]
> to
> [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
>   [cactus] Tomcat 5.x starting...
> Server [Apache-Coyote/1.1] started
>   [cactus] WARNING: multiple versions of ant detected in path for junit
>   [cactus]
>  jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
>   [cactus]      and
> jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
>   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
>   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.454 sec
>   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
>   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
>   [cactus] Tomcat 5.x started on port [57272]
>   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.32 sec
>   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
>   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.347 sec
>   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
>   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.307 sec
>   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
>   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 1.024 sec
>   [cactus] Tomcat 5.x is stopping...
>   [cactus] Tomcat 5.x is stopped
> 
> BUILD FAILED
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:750:
> The following error occurred while executing this line:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:731:
> The following error occurred while executing this line:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48:
> The following error occurred while executing this line:
> /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343:
> Tests failed!
> 
> Total time: 59 minutes 43 seconds
> [FINDBUGS] Skipping publisher since build result is FAILURE
> Publishing Javadoc
> Archiving artifacts
> Recording test results
> Recording fingerprints
> Publishing Clover coverage report...
> No Clover report will be published due to a Build Failure
> Email was triggered for: Failure
> Sending email for trigger: Failure
> 
> 
> 
> ###################################################################################
> ############################## FAILED TESTS (if any)
> ##############################
> 2 tests failed.
> FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit
> 
> Error Message:
> expected:<403> but was:<200>
> 
> Stack Trace:
> junit.framework.AssertionFailedError: expected:<403> but was:<200>
>        at
> org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
>        at
> org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
>        at
> org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
>        at
> org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
>        at
> org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
>        at
> org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)
> 
> 
> FAILED:
>  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified
> 
> Error Message:
> expected:<403> but was:<200>
> 
> Stack Trace:
> junit.framework.AssertionFailedError: expected:<403> but was:<200>
>        at
> org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
>        at
> org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
>        at
> org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
>        at
> org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
>        at
> org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
>        at
> org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)
> 
> 
> 
> 
> 
> 
> -- 
> Todd Lipcon
> Software Engineer, Cloudera

Fwd: Hadoop-Hdfs-trunk - Build # 591 - Still Failing

Posted by Todd Lipcon <to...@cloudera.com>.
Can someone familiar with hdfsproxy look into this consistent unit test
failure? People voted in support of keeping this contrib, but it would be
easier to be satisfied with that decision if someone stepped up to fix these
tests that have been failing for quite some time.

-Todd

---------- Forwarded message ----------
From: Apache Hudson Server <hu...@hudson.apache.org>
Date: Thu, Feb 24, 2011 at 4:36 AM
Subject: Hadoop-Hdfs-trunk - Build # 591 - Still Failing
To: hdfs-dev@hadoop.apache.org


See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/591/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE
###########################
[...truncated 719693 lines...]
   [mkdir] Created dir:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
    [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
    [echo]  Free Ports: startup-57271 / http-57272 / https-57273
    [echo] Please take a deep breath while Cargo gets the Tomcat for running
the servlet tests...
   [mkdir] Created dir:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
   [mkdir] Created dir:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [mkdir] Created dir:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
   [mkdir] Created dir:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
   [mkdir] Created dir:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
   [mkdir] Created dir:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
    [copy] Copying 1 file to
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [copy] Copying 1 file to
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [copy] Copying 1 file to
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
  [cactus] -----------------------------------------------------------------
  [cactus] Running tests against Tomcat 5.x @ http://localhost:57272
  [cactus] -----------------------------------------------------------------
  [cactus] Deploying
[/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war]
to
[/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
  [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
  [cactus] WARNING: multiple versions of ant detected in path for junit
  [cactus]
 jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
  [cactus]      and
jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
  [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
  [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.454 sec
  [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
  [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
  [cactus] Tomcat 5.x started on port [57272]
  [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.32 sec
  [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
  [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.347 sec
  [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
  [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.307 sec
  [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
  [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 1.024 sec
  [cactus] Tomcat 5.x is stopping...
  [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:750:
The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:731:
The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48:
The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343:
Tests failed!

Total time: 59 minutes 43 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any)
##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
       at
org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
       at
org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
       at
org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
       at
org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
       at
org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
       at
org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:
 org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
       at
org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
       at
org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
       at
org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
       at
org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
       at
org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
       at
org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)






-- 
Todd Lipcon
Software Engineer, Cloudera

Hadoop-Hdfs-trunk - Build # 591 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/591/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 719693 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-57271 / http-57272 / https-57273
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:57272
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.454 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tomcat 5.x started on port [57272]
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.32 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.347 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.307 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 1.024 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:750: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:731: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 59 minutes 43 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 590 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/590/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 735848 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-13253 / http-13254 / https-13255
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:13254
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.477 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.341 sec
   [cactus] Tomcat 5.x started on port [13254]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.323 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.319 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.806 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:750: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:731: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 61 minutes 19 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 589 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/589/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 703645 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-45766 / http-45767 / https-45768
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:45767
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.476 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.315 sec
   [cactus] Tomcat 5.x started on port [45767]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.354 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.322 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.825 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:750: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:731: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 51 minutes 19 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 588 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/588/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 706271 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-45613 / http-45614 / https-45615
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:45614
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.443 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.338 sec
   [cactus] Tomcat 5.x started on port [45614]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.312 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.334 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.867 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:750: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:731: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 61 minutes 2 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 587 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/587/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 708813 lines...]
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target
     [echo]  Including clover.jar in the war file ...
[cactifywar] Analyzing war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/hdfsproxy-2.0-test.war
[cactifywar] Building war: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war

cactifywar:

test-cactus:
     [echo]  Free Ports: startup-22991 / http-22992 / https-22993
     [echo] Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/temp
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/logs
    [mkdir] Created dir: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/reports
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
     [copy] Copying 1 file to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/conf
   [cactus] -----------------------------------------------------------------
   [cactus] Running tests against Tomcat 5.x @ http://localhost:22992
   [cactus] -----------------------------------------------------------------
   [cactus] Deploying [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/test.war] to [/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/contrib/hdfsproxy/target/tomcat-config/webapps]...
   [cactus] Tomcat 5.x starting...
Server [Apache-Coyote/1.1] started
   [cactus] WARNING: multiple versions of ant detected in path for junit 
   [cactus]          jar:file:/homes/hudson/tools/ant/latest/lib/ant.jar!/org/apache/tools/ant/Project.class
   [cactus]      and jar:file:/homes/hudson/.ivy2/cache/ant/ant/jars/ant-1.6.5.jar!/org/apache/tools/ant/Project.class
   [cactus] Running org.apache.hadoop.hdfsproxy.TestAuthorizationFilter
   [cactus] Tests run: 4, Failures: 2, Errors: 0, Time elapsed: 0.465 sec
   [cactus] Test org.apache.hadoop.hdfsproxy.TestAuthorizationFilter FAILED
   [cactus] Running org.apache.hadoop.hdfsproxy.TestLdapIpDirFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.524 sec
   [cactus] Tomcat 5.x started on port [22992]
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyFilter
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.361 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyForwardServlet
   [cactus] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0.325 sec
   [cactus] Running org.apache.hadoop.hdfsproxy.TestProxyUtil
   [cactus] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0.899 sec
   [cactus] Tomcat 5.x is stopping...
   [cactus] Tomcat 5.x is stopped

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:750: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:731: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/build.xml:48: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/contrib/hdfsproxy/build.xml:343: Tests failed!

Total time: 51 minutes 21 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermit

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermit(TestAuthorizationFilter.java:113)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)


FAILED:  org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.testPathPermitQualified

Error Message:
expected:<403> but was:<200>

Stack Trace:
junit.framework.AssertionFailedError: expected:<403> but was:<200>
	at org.apache.hadoop.hdfsproxy.TestAuthorizationFilter.endPathPermitQualified(TestAuthorizationFilter.java:136)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callGenericEndMethod(ClientTestCaseCaller.java:442)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.callEndMethod(ClientTestCaseCaller.java:209)
	at org.apache.cactus.internal.client.ClientTestCaseCaller.runTest(ClientTestCaseCaller.java:149)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBareClient(AbstractCactusTestCase.java:218)
	at org.apache.cactus.internal.AbstractCactusTestCase.runBare(AbstractCactusTestCase.java:134)




Hadoop-Hdfs-trunk - Build # 586 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/586/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 647441 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:445)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:633)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-02-18 12:33:05,487 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-18 12:33:05,587 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-18 12:33:05,587 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:59724, storageID=DS-1057536237-127.0.1.1-59724-1298032374660, infoPort=45229, ipcPort=44897):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-18 12:33:05,588 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 44897
    [junit] 2011-02-18 12:33:05,588 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-18 12:33:05,588 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-18 12:33:05,588 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-18 12:33:05,589 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-18 12:33:05,690 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-18 12:33:05,690 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 3 
    [junit] 2011-02-18 12:33:05,690 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-18 12:33:05,692 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 46365
    [junit] 2011-02-18 12:33:05,693 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 1 on 46365: exiting
    [junit] 2011-02-18 12:33:05,693 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 0 on 46365: exiting
    [junit] 2011-02-18 12:33:05,693 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 3 on 46365: exiting
    [junit] 2011-02-18 12:33:05,693 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 4 on 46365: exiting
    [junit] 2011-02-18 12:33:05,693 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 2 on 46365: exiting
    [junit] 2011-02-18 12:33:05,693 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 8 on 46365: exiting
    [junit] 2011-02-18 12:33:05,693 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 5 on 46365: exiting
    [junit] 2011-02-18 12:33:05,693 INFO  ipc.Server (Server.java:run(687)) - Stopping IPC Server Responder
    [junit] 2011-02-18 12:33:05,693 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 46365
    [junit] 2011-02-18 12:33:05,693 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 7 on 46365: exiting
    [junit] 2011-02-18 12:33:05,693 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 6 on 46365: exiting
    [junit] 2011-02-18 12:33:05,693 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 9 on 46365: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.378 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 60 minutes 6 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestHFlush.testHFlushInterrupted

Error Message:
End of file reached before reading fully.

Stack Trace:
java.io.EOFException: End of file reached before reading fully.
	at org.apache.hadoop.fs.FSInputStream.readFully(FSInputStream.java:73)
	at org.apache.hadoop.fs.FSDataInputStream.readFully(FSDataInputStream.java:61)
	at org.apache.hadoop.hdfs.AppendTestUtil.checkFullFile(AppendTestUtil.java:159)
	at org.apache.hadoop.hdfs.TestHFlush.__CLR3_0_213w22tpd8(TestHFlush.java:273)
	at org.apache.hadoop.hdfs.TestHFlush.testHFlushInterrupted(TestHFlush.java:216)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.EPollArrayWrapper.epollCreate(Native Method)
	at sun.nio.ch.EPollArrayWrapper.<init>(EPollArrayWrapper.java:69)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:52)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Listener.<init>(Server.java:316)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1513)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:576)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:338)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:298)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:46)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:550)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:422)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:513)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:283)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:265)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1576)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1519)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1486)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)




Hadoop-Hdfs-trunk - Build # 585 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/585/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 650617 lines...]
    [junit] 2011-02-17 12:32:38,770 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-17 12:32:38,770 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-02-17 12:32:38,872 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 43514
    [junit] 2011-02-17 12:32:38,872 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 0 on 43514: exiting
    [junit] 2011-02-17 12:32:38,872 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 43514
    [junit] 2011-02-17 12:32:38,872 INFO  ipc.Server (Server.java:run(687)) - Stopping IPC Server Responder
    [junit] 2011-02-17 12:32:38,872 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-02-17 12:32:38,873 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:37431, storageID=DS-1469273064-127.0.1.1-37431-1297945948029, infoPort=39656, ipcPort=43514):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-02-17 12:32:38,875 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-17 12:32:38,975 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-17 12:32:38,976 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:37431, storageID=DS-1469273064-127.0.1.1-37431-1297945948029, infoPort=39656, ipcPort=43514):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-17 12:32:38,976 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 43514
    [junit] 2011-02-17 12:32:38,976 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-17 12:32:38,976 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-17 12:32:38,976 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-17 12:32:38,977 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-17 12:32:39,078 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-17 12:32:39,078 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-17 12:32:39,078 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3 
    [junit] 2011-02-17 12:32:39,080 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 54471
    [junit] 2011-02-17 12:32:39,080 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 0 on 54471: exiting
    [junit] 2011-02-17 12:32:39,080 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 1 on 54471: exiting
    [junit] 2011-02-17 12:32:39,080 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 3 on 54471: exiting
    [junit] 2011-02-17 12:32:39,081 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 4 on 54471: exiting
    [junit] 2011-02-17 12:32:39,081 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 54471
    [junit] 2011-02-17 12:32:39,081 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 8 on 54471: exiting
    [junit] 2011-02-17 12:32:39,081 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 6 on 54471: exiting
    [junit] 2011-02-17 12:32:39,081 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 7 on 54471: exiting
    [junit] 2011-02-17 12:32:39,081 INFO  ipc.Server (Server.java:run(687)) - Stopping IPC Server Responder
    [junit] 2011-02-17 12:32:39,081 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 5 on 54471: exiting
    [junit] 2011-02-17 12:32:39,081 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 2 on 54471: exiting
    [junit] 2011-02-17 12:32:39,081 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 9 on 54471: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.473 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 59 minutes 25 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/VERSION (Too many open files)

Stack Trace:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/VERSION (Too many open files)
	at java.io.RandomAccessFile.open(Native Method)
	at java.io.RandomAccessFile.<init>(RandomAccessFile.java:212)
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.write(Storage.java:265)
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.write(Storage.java:259)
	at org.apache.hadoop.hdfs.server.common.Storage.writeAll(Storage.java:806)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.register(DataNode.java:714)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.runDatanodeDaemon(DataNode.java:1470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:692)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1546)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1411)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1357)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:600)
	at org.apache.hadoop.fs.FileSystem.setDefaultUri(FileSystem.java:162)
	at org.apache.hadoop.fs.FileSystem.setDefaultUri(FileSystem.java:170)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:449)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:772)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:119)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:235)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:284)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1460)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1546)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1411)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1357)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:600)
	at org.apache.hadoop.fs.FileSystem.setDefaultUri(FileSystem.java:162)
	at org.apache.hadoop.fs.FileSystem.setDefaultUri(FileSystem.java:170)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:449)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:772)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:119)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:235)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:284)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1460)




Hadoop-Hdfs-trunk - Build # 584 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/584/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 694186 lines...]
    [junit] 2011-02-16 12:33:33,046 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-16 12:33:33,046 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-02-16 12:33:33,048 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 45109
    [junit] 2011-02-16 12:33:33,048 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 0 on 45109: exiting
    [junit] 2011-02-16 12:33:33,049 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 45109
    [junit] 2011-02-16 12:33:33,049 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-02-16 12:33:33,049 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:40341, storageID=DS-622966542-127.0.1.1-40341-1297859602463, infoPort=55796, ipcPort=45109):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-02-16 12:33:33,049 INFO  ipc.Server (Server.java:run(687)) - Stopping IPC Server Responder
    [junit] 2011-02-16 12:33:33,051 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-16 12:33:33,152 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-16 12:33:33,152 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:40341, storageID=DS-622966542-127.0.1.1-40341-1297859602463, infoPort=55796, ipcPort=45109):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-16 12:33:33,152 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 45109
    [junit] 2011-02-16 12:33:33,152 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-16 12:33:33,153 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-16 12:33:33,153 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-16 12:33:33,153 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-16 12:33:33,255 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-16 12:33:33,255 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-16 12:33:33,256 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 4 
    [junit] 2011-02-16 12:33:33,257 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 39710
    [junit] 2011-02-16 12:33:33,258 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 0 on 39710: exiting
    [junit] 2011-02-16 12:33:33,258 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 4 on 39710: exiting
    [junit] 2011-02-16 12:33:33,258 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 3 on 39710: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.347 sec
    [junit] 2011-02-16 12:33:33,258 INFO  ipc.Server (Server.java:run(687)) - Stopping IPC Server Responder
    [junit] 2011-02-16 12:33:33,269 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 7 on 39710: exiting
    [junit] 2011-02-16 12:33:33,258 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 9 on 39710: exiting
    [junit] 2011-02-16 12:33:33,258 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 1 on 39710: exiting
    [junit] 2011-02-16 12:33:33,270 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 5 on 39710: exiting
    [junit] 2011-02-16 12:33:33,258 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 39710
    [junit] 2011-02-16 12:33:33,270 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 2 on 39710: exiting
    [junit] 2011-02-16 12:33:33,270 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 6 on 39710: exiting
    [junit] 2011-02-16 12:33:33,269 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 8 on 39710: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 60 minutes 33 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks expected:<2> but was:<1>
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2fte182r9s(TestBlockReport.java:457)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09(TestBlockReport.java:429)




Hadoop-Hdfs-trunk - Build # 583 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/583/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 681243 lines...]
    [junit] 2011-02-15 12:22:31,972 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-15 12:22:31,973 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-02-15 12:22:32,074 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 50629
    [junit] 2011-02-15 12:22:32,074 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 0 on 50629: exiting
    [junit] 2011-02-15 12:22:32,074 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 50629
    [junit] 2011-02-15 12:22:32,074 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-02-15 12:22:32,075 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:60243, storageID=DS-121796189-127.0.1.1-60243-1297772541242, infoPort=50058, ipcPort=50629):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-02-15 12:22:32,074 INFO  ipc.Server (Server.java:run(687)) - Stopping IPC Server Responder
    [junit] 2011-02-15 12:22:32,077 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-15 12:22:32,177 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-15 12:22:32,178 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:60243, storageID=DS-121796189-127.0.1.1-60243-1297772541242, infoPort=50058, ipcPort=50629):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-15 12:22:32,178 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 50629
    [junit] 2011-02-15 12:22:32,178 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-15 12:22:32,178 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-15 12:22:32,178 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-15 12:22:32,179 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-15 12:22:32,189 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-15 12:22:32,189 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-15 12:22:32,189 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 3 
    [junit] 2011-02-15 12:22:32,190 INFO  ipc.Server (Server.java:stop(1622)) - Stopping server on 41075
    [junit] 2011-02-15 12:22:32,191 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 0 on 41075: exiting
    [junit] 2011-02-15 12:22:32,191 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 7 on 41075: exiting
    [junit] 2011-02-15 12:22:32,191 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 5 on 41075: exiting
    [junit] 2011-02-15 12:22:32,191 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 1 on 41075: exiting
    [junit] 2011-02-15 12:22:32,191 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 2 on 41075: exiting
    [junit] 2011-02-15 12:22:32,191 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 9 on 41075: exiting
    [junit] 2011-02-15 12:22:32,191 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 8 on 41075: exiting
    [junit] 2011-02-15 12:22:32,191 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 6 on 41075: exiting
    [junit] 2011-02-15 12:22:32,191 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 4 on 41075: exiting
    [junit] 2011-02-15 12:22:32,192 INFO  ipc.Server (Server.java:run(1455)) - IPC Server handler 3 on 41075: exiting
    [junit] 2011-02-15 12:22:32,194 INFO  ipc.Server (Server.java:run(485)) - Stopping IPC Server listener on 41075
    [junit] 2011-02-15 12:22:32,194 INFO  ipc.Server (Server.java:run(687)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.355 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 49 minutes 22 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:614)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1522)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:576)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:338)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:298)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:46)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:550)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:422)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:513)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:283)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:265)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1576)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1519)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1486)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)




Hadoop-Hdfs-trunk - Build # 582 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/582/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 694693 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:445)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:633)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-02-14 12:23:40,358 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-14 12:23:40,459 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-14 12:23:40,459 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:51763, storageID=DS-1581103133-127.0.1.1-51763-1297686209769, infoPort=59454, ipcPort=44458):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-14 12:23:40,459 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 44458
    [junit] 2011-02-14 12:23:40,459 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-14 12:23:40,460 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-14 12:23:40,460 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-14 12:23:40,460 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-14 12:23:40,462 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-14 12:23:40,462 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 3 
    [junit] 2011-02-14 12:23:40,463 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-14 12:23:40,464 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 42776
    [junit] 2011-02-14 12:23:40,464 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 42776: exiting
    [junit] 2011-02-14 12:23:40,464 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 42776: exiting
    [junit] 2011-02-14 12:23:40,464 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 42776: exiting
    [junit] 2011-02-14 12:23:40,464 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 42776: exiting
    [junit] 2011-02-14 12:23:40,465 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 42776: exiting
    [junit] 2011-02-14 12:23:40,465 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 42776: exiting
    [junit] 2011-02-14 12:23:40,465 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-14 12:23:40,465 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 42776
    [junit] 2011-02-14 12:23:40,465 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 42776: exiting
    [junit] 2011-02-14 12:23:40,465 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 42776: exiting
    [junit] 2011-02-14 12:23:40,464 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 42776: exiting
    [junit] 2011-02-14 12:23:40,464 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 42776: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 34.983 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 50 minutes 37 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks expected:<2> but was:<1>
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2j2e00jr97(TestBlockReport.java:414)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08(TestBlockReport.java:390)




Hadoop-Hdfs-trunk - Build # 581 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/581/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 653948 lines...]
    [junit] 2011-02-13 12:24:37,445 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-13 12:24:37,445 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-02-13 12:24:37,547 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 49354
    [junit] 2011-02-13 12:24:37,547 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 49354: exiting
    [junit] 2011-02-13 12:24:37,547 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-02-13 12:24:37,547 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:51501, storageID=DS-860255907-127.0.1.1-51501-1297599866704, infoPort=46020, ipcPort=49354):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-02-13 12:24:37,547 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-13 12:24:37,547 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 49354
    [junit] 2011-02-13 12:24:37,550 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-13 12:24:37,550 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-13 12:24:37,551 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:51501, storageID=DS-860255907-127.0.1.1-51501-1297599866704, infoPort=46020, ipcPort=49354):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-13 12:24:37,551 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 49354
    [junit] 2011-02-13 12:24:37,551 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-13 12:24:37,551 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-13 12:24:37,551 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-13 12:24:37,552 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-13 12:24:37,653 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-13 12:24:37,653 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 12 2 
    [junit] 2011-02-13 12:24:37,654 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-13 12:24:37,655 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 53844
    [junit] 2011-02-13 12:24:37,655 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 53844: exiting
    [junit] 2011-02-13 12:24:37,656 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 53844: exiting
    [junit] 2011-02-13 12:24:37,656 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 53844: exiting
    [junit] 2011-02-13 12:24:37,656 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 53844: exiting
    [junit] 2011-02-13 12:24:37,656 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 53844: exiting
    [junit] 2011-02-13 12:24:37,656 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 53844: exiting
    [junit] 2011-02-13 12:24:37,657 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 53844: exiting
    [junit] 2011-02-13 12:24:37,657 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 53844: exiting
    [junit] 2011-02-13 12:24:37,656 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-13 12:24:37,656 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 53844
    [junit] 2011-02-13 12:24:37,656 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 53844: exiting
    [junit] 2011-02-13 12:24:37,656 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 53844: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.286 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 51 minutes 30 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1510)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:576)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:338)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:298)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:46)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:550)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:422)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:513)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:283)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:265)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1576)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1519)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1486)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:315)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tro5(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)




Hadoop-Hdfs-trunk - Build # 580 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/580/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 673325 lines...]
    [junit] 2011-02-12 13:09:09,046 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-12 13:09:09,047 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-12 13:09:09,047 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-02-12 13:09:09,148 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 58457
    [junit] 2011-02-12 13:09:09,149 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 58457: exiting
    [junit] 2011-02-12 13:09:09,149 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 58457
    [junit] 2011-02-12 13:09:09,149 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-12 13:09:09,150 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:43037, storageID=DS-1359992697-127.0.1.1-43037-1297516138283, infoPort=53748, ipcPort=58457):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:159)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:662)
    [junit] 
    [junit] 2011-02-12 13:09:09,150 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-12 13:09:09,250 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-12 13:09:09,251 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:43037, storageID=DS-1359992697-127.0.1.1-43037-1297516138283, infoPort=53748, ipcPort=58457):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-12 13:09:09,251 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 58457
    [junit] 2011-02-12 13:09:09,251 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-12 13:09:09,251 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-12 13:09:09,252 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-12 13:09:09,252 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-12 13:09:09,358 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-12 13:09:09,358 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-12 13:09:09,358 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3 
    [junit] 2011-02-12 13:09:09,362 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 39861
    [junit] 2011-02-12 13:09:09,362 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 39861: exiting
    [junit] 2011-02-12 13:09:09,362 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-12 13:09:09,362 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 39861
    [junit] 2011-02-12 13:09:09,362 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 39861: exiting
    [junit] 2011-02-12 13:09:09,363 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 39861: exiting
    [junit] 2011-02-12 13:09:09,363 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 39861: exiting
    [junit] 2011-02-12 13:09:09,363 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 39861: exiting
    [junit] 2011-02-12 13:09:09,362 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 39861: exiting
    [junit] 2011-02-12 13:09:09,362 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 39861: exiting
    [junit] 2011-02-12 13:09:09,363 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 39861: exiting
    [junit] 2011-02-12 13:09:09,363 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 39861: exiting
    [junit] 2011-02-12 13:09:09,363 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 39861: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.694 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 97 minutes 2 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
REGRESSION:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testErrorReplicas

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.




Hadoop-Hdfs-trunk - Build # 579 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/579/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 667231 lines...]
    [junit] 2011-02-11 12:47:50,951 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-11 12:47:50,952 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-02-11 12:47:51,061 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 33356
    [junit] 2011-02-11 12:47:51,062 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 33356: exiting
    [junit] 2011-02-11 12:47:51,062 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-02-11 12:47:51,062 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:32838, storageID=DS-1763883780-127.0.1.1-32838-1297428460026, infoPort=54976, ipcPort=33356):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-02-11 12:47:51,062 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-11 12:47:51,062 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 33356
    [junit] 2011-02-11 12:47:51,065 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-11 12:47:51,165 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-11 12:47:51,166 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:32838, storageID=DS-1763883780-127.0.1.1-32838-1297428460026, infoPort=54976, ipcPort=33356):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-11 12:47:51,166 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 33356
    [junit] 2011-02-11 12:47:51,166 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-11 12:47:51,167 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-11 12:47:51,167 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-11 12:47:51,169 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-11 12:47:51,271 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-11 12:47:51,271 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 3 
    [junit] 2011-02-11 12:47:51,271 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-11 12:47:51,272 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 57762
    [junit] 2011-02-11 12:47:51,273 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 57762: exiting
    [junit] 2011-02-11 12:47:51,273 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 57762: exiting
    [junit] 2011-02-11 12:47:51,273 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 57762: exiting
    [junit] 2011-02-11 12:47:51,273 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 57762: exiting
    [junit] 2011-02-11 12:47:51,273 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 57762: exiting
    [junit] 2011-02-11 12:47:51,273 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 57762: exiting
    [junit] 2011-02-11 12:47:51,273 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 57762: exiting
    [junit] 2011-02-11 12:47:51,274 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 57762: exiting
    [junit] 2011-02-11 12:47:51,274 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 57762: exiting
    [junit] 2011-02-11 12:47:51,274 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 57762: exiting
    [junit] 2011-02-11 12:47:51,274 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 57762
    [junit] 2011-02-11 12:47:51,274 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 37.153 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 73 minutes 12 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Listener.<init>(Server.java:318)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1501)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:576)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:338)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:298)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:46)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:550)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:422)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:513)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:283)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:265)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1576)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1519)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1486)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)




Hadoop-Hdfs-trunk - Build # 578 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/578/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 662998 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:445)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:633)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:390)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-02-10 12:47:13,695 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-10 12:47:13,709 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-10 12:47:13,795 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:51219, storageID=DS-1434452765-127.0.1.1-51219-1297342022685, infoPort=48801, ipcPort=37337):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-10 12:47:13,795 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 37337
    [junit] 2011-02-10 12:47:13,796 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-10 12:47:13,796 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-10 12:47:13,796 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-10 12:47:13,797 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-10 12:47:13,898 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-10 12:47:13,899 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 4 4 
    [junit] 2011-02-10 12:47:13,898 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-10 12:47:13,900 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 37835
    [junit] 2011-02-10 12:47:13,900 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 37835: exiting
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 37835: exiting
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 37835
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 37835: exiting
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 37835: exiting
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 37835: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.823 sec
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 37835: exiting
    [junit] 2011-02-10 12:47:13,902 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 37835: exiting
    [junit] 2011-02-10 12:47:13,902 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 37835: exiting
    [junit] 2011-02-10 12:47:13,902 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 37835: exiting
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 37835: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 69 minutes 17 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1546)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1411)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1357)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:600)
	at org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:804)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tro5(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1460)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files  at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)  at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)  at org.apache.hadoop.util.Shell.run(Shell.java:188)  at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)  at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:571)  at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:50)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:492)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:467)  at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)  at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)  at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1593)  at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1573)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1519)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1486)  at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)  at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)  at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)  at junit.framework.TestCase.runBare(TestCase.java:132)  at junit.framework.TestResult$1.protect(TestResult.java:110)  at junit.framework.TestResult.runProtected(TestResult.java:128)  at junit.framework.TestResult.run(TestResult.java:113)  at junit.framework.TestCase.run(TestCase.java:124)  at junit.framework.TestSuite.runTest(TestSuite.java:232)  at junit.framework.TestSuite.run(TestSuite.java:227)  at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)  at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768) Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files  at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)  at java.lang.ProcessImpl.start(ProcessImpl.java:65)  at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)  ... 34 more 

Stack Trace:
java.lang.RuntimeException: Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)
	at org.apache.hadoop.util.Shell.run(Shell.java:188)
	at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)
	at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:571)
	at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:50)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:492)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:467)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1593)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1573)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1519)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1486)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
	at java.lang.ProcessImpl.start(ProcessImpl.java:65)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)

	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:517)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:467)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1593)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1573)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1519)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1486)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)




Hadoop-Hdfs-trunk - Build # 577 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/577/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 703816 lines...]
    [junit] 2011-02-09 12:38:56,136 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-09 12:38:56,136 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-09 12:38:56,137 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-02-09 12:38:56,249 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 46799
    [junit] 2011-02-09 12:38:56,249 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 46799: exiting
    [junit] 2011-02-09 12:38:56,250 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 46799
    [junit] 2011-02-09 12:38:56,250 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-09 12:38:56,250 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:59361, storageID=DS-300555630-127.0.1.1-59361-1297255125241, infoPort=58635, ipcPort=46799):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-02-09 12:38:56,251 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-09 12:38:56,265 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-09 12:38:56,351 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:59361, storageID=DS-300555630-127.0.1.1-59361-1297255125241, infoPort=58635, ipcPort=46799):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-09 12:38:56,352 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 46799
    [junit] 2011-02-09 12:38:56,352 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-09 12:38:56,352 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-09 12:38:56,352 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-09 12:38:56,353 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-09 12:38:56,455 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-09 12:38:56,455 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-09 12:38:56,456 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 11 3 
    [junit] 2011-02-09 12:38:56,457 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 60968
    [junit] 2011-02-09 12:38:56,458 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 60968: exiting
    [junit] 2011-02-09 12:38:56,458 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 60968: exiting
    [junit] 2011-02-09 12:38:56,458 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 60968: exiting
    [junit] 2011-02-09 12:38:56,458 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 60968: exiting
    [junit] 2011-02-09 12:38:56,459 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 60968: exiting
    [junit] 2011-02-09 12:38:56,458 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-09 12:38:56,459 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 60968: exiting
    [junit] 2011-02-09 12:38:56,459 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 60968: exiting
    [junit] 2011-02-09 12:38:56,459 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 60968: exiting
    [junit] 2011-02-09 12:38:56,459 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 60968: exiting
    [junit] 2011-02-09 12:38:56,458 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 60968
    [junit] 2011-02-09 12:38:56,459 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 60968: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 37.138 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 64 minutes 1 second
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.namenode.TestListCorruptFileBlocks.testListCorruptFileBlocksInSafeMode

Error Message:
Namenode is not in safe mode

Stack Trace:
junit.framework.AssertionFailedError: Namenode is not in safe mode
	at org.apache.hadoop.hdfs.server.namenode.TestListCorruptFileBlocks.__CLR3_0_2mvj3yzpiu(TestListCorruptFileBlocks.java:241)
	at org.apache.hadoop.hdfs.server.namenode.TestListCorruptFileBlocks.testListCorruptFileBlocksInSafeMode(TestListCorruptFileBlocks.java:132)




Hadoop-Hdfs-trunk - Build # 576 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/576/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 664793 lines...]
    [junit] 2011-02-08 12:51:19,626 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-08 12:51:19,627 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-02-08 12:51:19,737 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 37251
    [junit] 2011-02-08 12:51:19,737 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 37251: exiting
    [junit] 2011-02-08 12:51:19,738 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-02-08 12:51:19,738 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:55893, storageID=DS-1346001687-127.0.1.1-55893-1297169468694, infoPort=59523, ipcPort=37251):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-02-08 12:51:19,738 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 37251
    [junit] 2011-02-08 12:51:19,738 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-08 12:51:19,738 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-08 12:51:19,750 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-08 12:51:19,850 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:55893, storageID=DS-1346001687-127.0.1.1-55893-1297169468694, infoPort=59523, ipcPort=37251):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-08 12:51:19,851 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 37251
    [junit] 2011-02-08 12:51:19,851 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-08 12:51:19,851 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-08 12:51:19,851 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-08 12:51:19,852 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-08 12:51:19,854 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-08 12:51:19,854 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 2 
    [junit] 2011-02-08 12:51:19,854 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-08 12:51:19,855 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 38969
    [junit] 2011-02-08 12:51:19,856 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 38969: exiting
    [junit] 2011-02-08 12:51:19,856 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 38969: exiting
    [junit] 2011-02-08 12:51:19,856 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 38969: exiting
    [junit] 2011-02-08 12:51:19,856 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 38969: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.68 sec
    [junit] 2011-02-08 12:51:19,859 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 38969: exiting
    [junit] 2011-02-08 12:51:19,860 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 38969: exiting
    [junit] 2011-02-08 12:51:19,860 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 38969: exiting
    [junit] 2011-02-08 12:51:19,860 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 38969: exiting
    [junit] 2011-02-08 12:51:19,860 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 38969: exiting
    [junit] 2011-02-08 12:51:19,860 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 38969: exiting
    [junit] 2011-02-08 12:51:19,861 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-08 12:51:19,864 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 38969

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 76 minutes 44 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.EPollArrayWrapper.epollCreate(Native Method)
	at sun.nio.ch.EPollArrayWrapper.<init>(EPollArrayWrapper.java:68)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:52)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Listener.<init>(Server.java:318)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1501)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:576)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:338)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:298)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:46)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:550)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:422)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:513)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:283)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:265)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1576)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1519)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1486)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer1

Error Message:
127.0.0.1:44994is not an underUtilized node

Stack Trace:
junit.framework.AssertionFailedError: 127.0.0.1:44994is not an underUtilized node
	at org.apache.hadoop.hdfs.server.balancer.Balancer.initNodes(Balancer.java:1012)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.initNodes(Balancer.java:954)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1497)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testUnevenDistribution(TestBalancer.java:185)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_2cs3hxsso5(TestBalancer.java:335)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer1(TestBalancer.java:332)


REGRESSION:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancerDefaultConstructor(TestBalancer.java:353)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_2g13gq9so9(TestBalancer.java:344)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2(TestBalancer.java:341)


FAILED:  org.apache.hadoop.hdfs.TestLargeBlock.testLargeBlockSize

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.




Hadoop-Hdfs-trunk - Build # 575 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/575/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 694602 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:445)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:633)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-02-07 13:05:09,027 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-07 13:05:09,114 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-07 13:05:09,128 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:39865, storageID=DS-1815951553-127.0.1.1-39865-1297083898075, infoPort=33846, ipcPort=59191):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-07 13:05:09,128 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 59191
    [junit] 2011-02-07 13:05:09,128 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-07 13:05:09,129 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-07 13:05:09,129 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-07 13:05:09,129 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-07 13:05:09,232 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-07 13:05:09,232 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-07 13:05:09,232 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3 
    [junit] 2011-02-07 13:05:09,234 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 56703
    [junit] 2011-02-07 13:05:09,234 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 56703: exiting
    [junit] 2011-02-07 13:05:09,234 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-07 13:05:09,235 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 56703: exiting
    [junit] 2011-02-07 13:05:09,235 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 56703: exiting
    [junit] 2011-02-07 13:05:09,235 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 56703: exiting
    [junit] 2011-02-07 13:05:09,235 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 56703: exiting
    [junit] 2011-02-07 13:05:09,235 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 56703: exiting
    [junit] 2011-02-07 13:05:09,235 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 56703
    [junit] 2011-02-07 13:05:09,236 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 56703: exiting
    [junit] 2011-02-07 13:05:09,235 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 56703: exiting
    [junit] 2011-02-07 13:05:09,235 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 56703: exiting
    [junit] 2011-02-07 13:05:09,235 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 56703: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.667 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 90 minutes 36 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestLargeBlock.testLargeBlockSize

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of dce7cc80c6da033edb1cb296c49a316e but expecting 0741d4a446340e1d20403fc4565760d7

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of dce7cc80c6da033edb1cb296c49a316e but expecting 0741d4a446340e1d20403fc4565760d7
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:670)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:710)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:603)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:480)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:441)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4ubl(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 574 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/574/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 684897 lines...]
    [junit] 2011-02-06 12:35:47,390 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-06 12:35:47,390 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-02-06 12:35:47,492 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 53480
    [junit] 2011-02-06 12:35:47,492 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 53480: exiting
    [junit] 2011-02-06 12:35:47,493 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 53480
    [junit] 2011-02-06 12:35:47,493 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-02-06 12:35:47,493 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:36039, storageID=DS-1455553998-127.0.1.1-36039-1296995736550, infoPort=42382, ipcPort=53480):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-02-06 12:35:47,493 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-06 12:35:47,495 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-06 12:35:47,592 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-06 12:35:47,596 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:36039, storageID=DS-1455553998-127.0.1.1-36039-1296995736550, infoPort=42382, ipcPort=53480):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-06 12:35:47,596 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 53480
    [junit] 2011-02-06 12:35:47,596 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-06 12:35:47,597 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-06 12:35:47,597 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-06 12:35:47,597 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-06 12:35:47,700 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-06 12:35:47,700 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-06 12:35:47,700 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 3 
    [junit] 2011-02-06 12:35:47,702 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 36868
    [junit] 2011-02-06 12:35:47,702 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 36868: exiting
    [junit] 2011-02-06 12:35:47,702 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 36868
    [junit] 2011-02-06 12:35:47,703 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 36868: exiting
    [junit] 2011-02-06 12:35:47,703 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 36868: exiting
    [junit] 2011-02-06 12:35:47,703 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 36868: exiting
    [junit] 2011-02-06 12:35:47,703 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 36868: exiting
    [junit] 2011-02-06 12:35:47,702 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 36868: exiting
    [junit] 2011-02-06 12:35:47,704 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 36868: exiting
    [junit] 2011-02-06 12:35:47,702 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-06 12:35:47,704 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 36868: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.334 sec
    [junit] 2011-02-06 12:35:47,703 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 36868: exiting
    [junit] 2011-02-06 12:35:47,702 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 36868: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 61 minutes 22 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks expected:<2> but was:<1>
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2fte182r7m(TestBlockReport.java:457)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09(TestBlockReport.java:429)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 32306d67a4acbc6ccb3bb49e790cfd9c but expecting e2889bb9ee0079b53e1d6ad739ad3545

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 32306d67a4acbc6ccb3bb49e790cfd9c but expecting e2889bb9ee0079b53e1d6ad739ad3545
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:670)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:710)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:603)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:480)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:441)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4ubl(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 573 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/573/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 696949 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:445)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:633)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-02-05 12:44:19,608 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-05 12:44:19,698 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-05 12:44:19,708 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:60658, storageID=DS-1815650214-127.0.1.1-60658-1296909848618, infoPort=37402, ipcPort=54671):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-05 12:44:19,709 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 54671
    [junit] 2011-02-05 12:44:19,709 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-05 12:44:19,709 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-05 12:44:19,709 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-05 12:44:19,710 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-05 12:44:19,812 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-05 12:44:19,812 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-05 12:44:19,812 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3 
    [junit] 2011-02-05 12:44:19,815 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 45388
    [junit] 2011-02-05 12:44:19,815 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 45388: exiting
    [junit] 2011-02-05 12:44:19,816 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 45388
    [junit] 2011-02-05 12:44:19,816 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 45388: exiting
    [junit] 2011-02-05 12:44:19,816 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-05 12:44:19,816 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 45388: exiting
    [junit] 2011-02-05 12:44:19,816 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 45388: exiting
    [junit] 2011-02-05 12:44:19,816 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 45388: exiting
    [junit] 2011-02-05 12:44:19,817 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 45388: exiting
    [junit] 2011-02-05 12:44:19,817 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 45388: exiting
    [junit] 2011-02-05 12:44:19,817 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 45388: exiting
    [junit] 2011-02-05 12:44:19,817 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 45388: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.904 sec
    [junit] 2011-02-05 12:44:19,820 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 45388: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 69 minutes 51 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08

Error Message:
Was waiting too long for a replica to become TEMPORARY

Stack Trace:
junit.framework.AssertionFailedError: Was waiting too long for a replica to become TEMPORARY
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.waitForTempReplica(TestBlockReport.java:514)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2j2e00jr71(TestBlockReport.java:408)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08(TestBlockReport.java:390)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 9bd71805a213b5bbe19c7b70e192966e but expecting f835a1af7ec34ec3e0955f537280da4b

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 9bd71805a213b5bbe19c7b70e192966e but expecting f835a1af7ec34ec3e0955f537280da4b
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:670)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:710)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:603)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:480)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:441)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4ubl(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 572 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/572/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 682735 lines...]
    [junit] 2011-02-04 12:45:20,717 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-04 12:45:20,717 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-02-04 12:45:20,819 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 60426
    [junit] 2011-02-04 12:45:20,819 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 60426: exiting
    [junit] 2011-02-04 12:45:20,820 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 60426
    [junit] 2011-02-04 12:45:20,820 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-02-04 12:45:20,820 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-04 12:45:20,821 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:43470, storageID=DS-765416819-127.0.1.1-43470-1296823509836, infoPort=44299, ipcPort=60426):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-02-04 12:45:20,822 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-04 12:45:20,870 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-04 12:45:20,923 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:43470, storageID=DS-765416819-127.0.1.1-43470-1296823509836, infoPort=44299, ipcPort=60426):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-04 12:45:20,923 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 60426
    [junit] 2011-02-04 12:45:20,923 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-04 12:45:20,923 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-04 12:45:20,924 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-04 12:45:20,924 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-04 12:45:21,034 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-04 12:45:21,034 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 4 
    [junit] 2011-02-04 12:45:21,034 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-04 12:45:21,036 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 59863
    [junit] 2011-02-04 12:45:21,036 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 59863: exiting
    [junit] 2011-02-04 12:45:21,036 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 59863: exiting
    [junit] 2011-02-04 12:45:21,037 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 59863: exiting
    [junit] 2011-02-04 12:45:21,036 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 59863: exiting
    [junit] 2011-02-04 12:45:21,037 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-04 12:45:21,037 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 59863
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.542 sec
    [junit] 2011-02-04 12:45:21,043 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 59863: exiting
    [junit] 2011-02-04 12:45:21,043 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 59863: exiting
    [junit] 2011-02-04 12:45:21,043 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 59863: exiting
    [junit] 2011-02-04 12:45:21,044 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 59863: exiting
    [junit] 2011-02-04 12:45:21,044 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 59863: exiting
    [junit] 2011-02-04 12:45:21,044 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 59863: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 70 minutes 58 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 716f9025f1f4fbde54beebac4d6297de but expecting 868519daec2b4d7e8889744df8aa3b1d

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 716f9025f1f4fbde54beebac4d6297de but expecting 868519daec2b4d7e8889744df8aa3b1d
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:670)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:710)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:603)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:480)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:441)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4ual(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 571 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/571/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 683887 lines...]
    [junit] 2011-02-03 12:46:17,164 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-03 12:46:17,165 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-02-03 12:46:17,270 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 56581
    [junit] 2011-02-03 12:46:17,270 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 56581: exiting
    [junit] 2011-02-03 12:46:17,270 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 56581
    [junit] 2011-02-03 12:46:17,270 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-02-03 12:46:17,271 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-03 12:46:17,271 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:41945, storageID=DS-1755074574-127.0.1.1-41945-1296737166409, infoPort=45492, ipcPort=56581):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-02-03 12:46:17,273 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-03 12:46:17,373 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-03 12:46:17,373 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:41945, storageID=DS-1755074574-127.0.1.1-41945-1296737166409, infoPort=45492, ipcPort=56581):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-03 12:46:17,374 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 56581
    [junit] 2011-02-03 12:46:17,374 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-03 12:46:17,374 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-03 12:46:17,374 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-03 12:46:17,375 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-03 12:46:17,379 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-03 12:46:17,379 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 4 
    [junit] 2011-02-03 12:46:17,379 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-03 12:46:17,381 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 42659
    [junit] 2011-02-03 12:46:17,381 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 42659: exiting
    [junit] 2011-02-03 12:46:17,381 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 42659: exiting
    [junit] 2011-02-03 12:46:17,382 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-03 12:46:17,381 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 42659
    [junit] 2011-02-03 12:46:17,383 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 42659: exiting
    [junit] 2011-02-03 12:46:17,383 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 42659: exiting
    [junit] 2011-02-03 12:46:17,384 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 42659: exiting
    [junit] 2011-02-03 12:46:17,385 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 42659: exiting
    [junit] 2011-02-03 12:46:17,385 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 42659: exiting
    [junit] 2011-02-03 12:46:17,385 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 42659: exiting
    [junit] 2011-02-03 12:46:17,385 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 42659: exiting
    [junit] 2011-02-03 12:46:17,385 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 42659: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.717 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 71 minutes 55 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
FAILED:  org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken.testDelegationTokenRpc

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.ipc.WritableRpcEngine.getProxy(WritableRpcEngine.java:241)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:422)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:368)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:333)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:461)
	at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:442)
	at org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken$1.run(TestClientProtocolWithDelegationToken.java:110)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:396)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1142)
	at org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken.__CLR3_0_23sqt3nomq(TestClientProtocolWithDelegationToken.java:105)
	at org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken.testDelegationTokenRpc(TestClientProtocolWithDelegationToken.java:77)


FAILED:  org.apache.hadoop.hdfs.security.token.block.TestBlockToken.testBlockTokenRpc

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.ipc.WritableRpcEngine.getProxy(WritableRpcEngine.java:241)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:422)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:368)
	at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:345)
	at org.apache.hadoop.hdfs.security.token.block.TestBlockToken.__CLR3_0_2i25d82ok8(TestBlockToken.java:212)
	at org.apache.hadoop.hdfs.security.token.block.TestBlockToken.testBlockTokenRpc(TestBlockToken.java:185)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of fc82ec67753f9c5d68d067790592212f but expecting 098e7b79892d15b17bbeedcbc943a098

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of fc82ec67753f9c5d68d067790592212f but expecting 098e7b79892d15b17bbeedcbc943a098
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:670)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:710)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:603)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:480)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:441)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4uab(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 570 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/570/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 647990 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:445)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:633)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-02-02 12:44:05,074 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-02 12:44:05,152 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-02 12:44:05,174 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:44328, storageID=DS-961653417-127.0.1.1-44328-1296650634128, infoPort=44680, ipcPort=36685):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-02 12:44:05,175 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 36685
    [junit] 2011-02-02 12:44:05,175 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-02 12:44:05,175 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-02 12:44:05,175 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-02 12:44:05,175 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-02 12:44:05,278 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-02 12:44:05,278 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-02 12:44:05,278 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 3 3 
    [junit] 2011-02-02 12:44:05,279 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 55594
    [junit] 2011-02-02 12:44:05,280 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 55594: exiting
    [junit] 2011-02-02 12:44:05,280 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 55594: exiting
    [junit] 2011-02-02 12:44:05,280 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 55594: exiting
    [junit] 2011-02-02 12:44:05,280 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 55594: exiting
    [junit] 2011-02-02 12:44:05,280 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 55594: exiting
    [junit] 2011-02-02 12:44:05,280 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 55594: exiting
    [junit] 2011-02-02 12:44:05,281 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 55594: exiting
    [junit] 2011-02-02 12:44:05,281 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 55594: exiting
    [junit] 2011-02-02 12:44:05,281 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 55594: exiting
    [junit] 2011-02-02 12:44:05,280 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 55594: exiting
    [junit] 2011-02-02 12:44:05,281 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 55594
    [junit] 2011-02-02 12:44:05,281 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 37.192 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 69 minutes 48 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
6 tests failed.
FAILED:  org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken.testDelegationTokenRpc

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.ipc.WritableRpcEngine.getProxy(WritableRpcEngine.java:241)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:422)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:368)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:333)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:461)
	at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:442)
	at org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken$1.run(TestClientProtocolWithDelegationToken.java:110)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:396)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1142)
	at org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken.__CLR3_0_23sqt3nomq(TestClientProtocolWithDelegationToken.java:105)
	at org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken.testDelegationTokenRpc(TestClientProtocolWithDelegationToken.java:77)


FAILED:  org.apache.hadoop.hdfs.security.token.block.TestBlockToken.testBlockTokenRpc

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.ipc.WritableRpcEngine.getProxy(WritableRpcEngine.java:241)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:422)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:368)
	at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:345)
	at org.apache.hadoop.hdfs.security.token.block.TestBlockToken.__CLR3_0_2i25d82ok8(TestBlockToken.java:212)
	at org.apache.hadoop.hdfs.security.token.block.TestBlockToken.testBlockTokenRpc(TestBlockToken.java:185)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:563)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:582)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:563)
	at org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:582)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1510)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:576)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:338)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:298)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:46)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:550)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:422)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:513)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:283)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:265)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1576)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1519)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1486)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 25c47fa48ee71ac48df1b7582aabca09 but expecting 6372643c69dcac72706a96735981c98a

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 25c47fa48ee71ac48df1b7582aabca09 but expecting 6372643c69dcac72706a96735981c98a
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:670)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:710)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:603)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:480)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:441)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4uab(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 569 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/569/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 672926 lines...]
    [junit] 2011-02-01 12:35:29,209 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-01 12:35:29,209 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-02-01 12:35:29,312 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 48677
    [junit] 2011-02-01 12:35:29,312 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 48677: exiting
    [junit] 2011-02-01 12:35:29,312 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 48677
    [junit] 2011-02-01 12:35:29,313 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-01 12:35:29,312 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-02-01 12:35:29,313 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:59600, storageID=DS-1043273588-127.0.1.1-59600-1296563718288, infoPort=40832, ipcPort=48677):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-02-01 12:35:29,315 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-01 12:35:29,331 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-01 12:35:29,416 INFO  datanode.DataNode (DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:59600, storageID=DS-1043273588-127.0.1.1-59600-1296563718288, infoPort=40832, ipcPort=48677):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-01 12:35:29,416 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 48677
    [junit] 2011-02-01 12:35:29,416 INFO  datanode.DataNode (DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-02-01 12:35:29,416 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-02-01 12:35:29,416 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-02-01 12:35:29,417 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-02-01 12:35:29,529 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-01 12:35:29,528 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2847)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-01 12:35:29,529 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(595)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 4 
    [junit] 2011-02-01 12:35:29,530 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 42159
    [junit] 2011-02-01 12:35:29,531 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 42159: exiting
    [junit] 2011-02-01 12:35:29,531 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 42159: exiting
    [junit] 2011-02-01 12:35:29,531 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 42159
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.765 sec
    [junit] 2011-02-01 12:35:29,535 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 42159: exiting
    [junit] 2011-02-01 12:35:29,535 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-02-01 12:35:29,535 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 42159: exiting
    [junit] 2011-02-01 12:35:29,536 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 42159: exiting
    [junit] 2011-02-01 12:35:29,536 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 42159: exiting
    [junit] 2011-02-01 12:35:29,538 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 42159: exiting
    [junit] 2011-02-01 12:35:29,538 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 42159: exiting
    [junit] 2011-02-01 12:35:29,539 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 42159: exiting
    [junit] 2011-02-01 12:35:29,539 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 42159: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 61 minutes 7 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
6 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken.testDelegationTokenRpc

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.ipc.WritableRpcEngine.getProxy(WritableRpcEngine.java:241)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:422)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:368)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:333)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:461)
	at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:442)
	at org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken$1.run(TestClientProtocolWithDelegationToken.java:110)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:396)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1142)
	at org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken.__CLR3_0_23sqt3nola(TestClientProtocolWithDelegationToken.java:105)
	at org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken.testDelegationTokenRpc(TestClientProtocolWithDelegationToken.java:77)


FAILED:  org.apache.hadoop.hdfs.security.token.block.TestBlockToken.testBlockTokenRpc

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.ipc.WritableRpcEngine.getProxy(WritableRpcEngine.java:241)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:422)
	at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:368)
	at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:345)
	at org.apache.hadoop.hdfs.security.token.block.TestBlockToken.__CLR3_0_2i25d82ois(TestBlockToken.java:212)
	at org.apache.hadoop.hdfs.security.token.block.TestBlockToken.testBlockTokenRpc(TestBlockToken.java:185)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1510)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:576)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:338)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:298)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:46)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:550)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:422)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:513)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:283)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:265)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1576)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1519)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1486)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:315)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5trj9(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 4b0319f5f17997e862e5ec978d475775 but expecting a365969efc0d144bbd316c6e12a46ae7

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 4b0319f5f17997e862e5ec978d475775 but expecting a365969efc0d144bbd316c6e12a46ae7
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4u8u(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 568 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/568/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 1090 lines...]
    [javac]   public ProtocolSignature getProtocolSignature(String protocol,
    [javac]          ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java:89: cannot find symbol
    [javac] symbol  : class ProtocolSignature
    [javac] location: package org.apache.hadoop.ipc
    [javac] import org.apache.hadoop.ipc.ProtocolSignature;
    [javac]                             ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java:169: cannot find symbol
    [javac] symbol  : class ProtocolSignature
    [javac] location: class org.apache.hadoop.hdfs.server.namenode.NameNode
    [javac]   public ProtocolSignature getProtocolSignature(String protocol,
    [javac]          ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java:1768: cannot find symbol
    [javac] symbol  : variable ProtocolSignature
    [javac] location: class org.apache.hadoop.hdfs.server.datanode.DataNode
    [javac]     return ProtocolSignature.getProtocolSigature(
    [javac]            ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java:1765: method does not override or implement a method from a supertype
    [javac]   @Override
    [javac]   ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java:171: cannot find symbol
    [javac] symbol  : variable ProtocolSignature
    [javac] location: class org.apache.hadoop.hdfs.server.namenode.NameNode
    [javac]     return ProtocolSignature.getProtocolSigature(
    [javac]            ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java:168: method does not override or implement a method from a supertype
    [javac]   @Override
    [javac]   ^
    [javac] Note: Some input files use or override a deprecated API.
    [javac] Note: Recompile with -Xlint:deprecation for details.
    [javac] 8 errors

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:344: Compile failed; see the compiler error output for details.

Total time: 7 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================


mv: cannot stat `build/*.tar.gz': No such file or directory
mv: cannot stat `build/*.jar': No such file or directory
mv: cannot stat `build/test/findbugs': No such file or directory
mv: cannot stat `build/docs/api': No such file or directory
Build Failed
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
No tests ran.

Hadoop-Hdfs-trunk - Build # 567 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/567/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 1090 lines...]
    [javac]   public ProtocolSignature getProtocolSignature(String protocol,
    [javac]          ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java:89: cannot find symbol
    [javac] symbol  : class ProtocolSignature
    [javac] location: package org.apache.hadoop.ipc
    [javac] import org.apache.hadoop.ipc.ProtocolSignature;
    [javac]                             ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java:169: cannot find symbol
    [javac] symbol  : class ProtocolSignature
    [javac] location: class org.apache.hadoop.hdfs.server.namenode.NameNode
    [javac]   public ProtocolSignature getProtocolSignature(String protocol,
    [javac]          ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java:1768: cannot find symbol
    [javac] symbol  : variable ProtocolSignature
    [javac] location: class org.apache.hadoop.hdfs.server.datanode.DataNode
    [javac]     return ProtocolSignature.getProtocolSigature(
    [javac]            ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java:1765: method does not override or implement a method from a supertype
    [javac]   @Override
    [javac]   ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java:171: cannot find symbol
    [javac] symbol  : variable ProtocolSignature
    [javac] location: class org.apache.hadoop.hdfs.server.namenode.NameNode
    [javac]     return ProtocolSignature.getProtocolSigature(
    [javac]            ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java:168: method does not override or implement a method from a supertype
    [javac]   @Override
    [javac]   ^
    [javac] Note: Some input files use or override a deprecated API.
    [javac] Note: Recompile with -Xlint:deprecation for details.
    [javac] 8 errors

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:344: Compile failed; see the compiler error output for details.

Total time: 7 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================


mv: cannot stat `build/*.tar.gz': No such file or directory
mv: cannot stat `build/*.jar': No such file or directory
mv: cannot stat `build/test/findbugs': No such file or directory
mv: cannot stat `build/docs/api': No such file or directory
Build Failed
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
No tests ran.

Hadoop-Hdfs-trunk - Build # 566 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/566/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 1089 lines...]
    [javac]   public ProtocolSignature getProtocolSignature(String protocol,
    [javac]          ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java:89: cannot find symbol
    [javac] symbol  : class ProtocolSignature
    [javac] location: package org.apache.hadoop.ipc
    [javac] import org.apache.hadoop.ipc.ProtocolSignature;
    [javac]                             ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java:169: cannot find symbol
    [javac] symbol  : class ProtocolSignature
    [javac] location: class org.apache.hadoop.hdfs.server.namenode.NameNode
    [javac]   public ProtocolSignature getProtocolSignature(String protocol,
    [javac]          ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java:1768: cannot find symbol
    [javac] symbol  : variable ProtocolSignature
    [javac] location: class org.apache.hadoop.hdfs.server.datanode.DataNode
    [javac]     return ProtocolSignature.getProtocolSigature(
    [javac]            ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java:1765: method does not override or implement a method from a supertype
    [javac]   @Override
    [javac]   ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java:171: cannot find symbol
    [javac] symbol  : variable ProtocolSignature
    [javac] location: class org.apache.hadoop.hdfs.server.namenode.NameNode
    [javac]     return ProtocolSignature.getProtocolSigature(
    [javac]            ^
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java:168: method does not override or implement a method from a supertype
    [javac]   @Override
    [javac]   ^
    [javac] Note: Some input files use or override a deprecated API.
    [javac] Note: Recompile with -Xlint:deprecation for details.
    [javac] 8 errors

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:344: Compile failed; see the compiler error output for details.

Total time: 7 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================


mv: cannot stat `build/*.tar.gz': No such file or directory
mv: cannot stat `build/*.jar': No such file or directory
mv: cannot stat `build/test/findbugs': No such file or directory
mv: cannot stat `build/docs/api': No such file or directory
Build Failed
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
No tests ran.

Hadoop-Hdfs-trunk - Build # 565 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/565/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 344613 lines...]
    [junit] 2011-01-28 16:47:24,295 INFO  hdfs.StateChange (BlockManager.java:computeReplicationWorkForBlock(935)) - BLOCK* ask 127.0.0.1:56404 to replicate blk_4801764326403794770_1017 to datanode(s) 127.0.0.1:50485
    [junit] 2011-01-28 16:47:24,295 INFO  hdfs.StateChange (BlockManager.java:computeReplicationWorkForBlock(935)) - BLOCK* ask 127.0.0.1:56404 to replicate blk_6965342782379580550_1019 to datanode(s) 127.0.0.1:50485
    [junit] 2011-01-28 16:47:25,321 INFO  datanode.DataNode (DataNode.java:transferBlock(1213)) - DatanodeRegistration(127.0.0.1:56404, storageID=DS-1165592067-127.0.1.1-56404-1296232639099, infoPort=37897, ipcPort=39910) Starting thread to transfer block blk_4801764326403794770_1017 to 127.0.0.1:50485 
    [junit] 2011-01-28 16:47:25,321 INFO  datanode.DataNode (DataNode.java:transferBlock(1213)) - DatanodeRegistration(127.0.0.1:56404, storageID=DS-1165592067-127.0.1.1-56404-1296232639099, infoPort=37897, ipcPort=39910) Starting thread to transfer block blk_6965342782379580550_1019 to 127.0.0.1:50485 
    [junit] 2011-01-28 16:47:25,322 WARN  datanode.DataNode (DataNode.java:run(1402)) - DatanodeRegistration(127.0.0.1:56404, storageID=DS-1165592067-127.0.1.1-56404-1296232639099, infoPort=37897, ipcPort=39910):Failed to transfer blk_4801764326403794770_1017 to 127.0.0.1:50485 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1370)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-28 16:47:25,322 WARN  datanode.DataNode (DataNode.java:run(1402)) - DatanodeRegistration(127.0.0.1:56404, storageID=DS-1165592067-127.0.1.1-56404-1296232639099, infoPort=37897, ipcPort=39910):Failed to transfer blk_6965342782379580550_1019 to 127.0.0.1:50485 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1370)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-28 16:47:25,926 INFO  datanode.DataNode (DataNode.java:transferBlock(1213)) - DatanodeRegistration(127.0.0.1:36514, storageID=DS-893032692-127.0.1.1-36514-1296232639714, infoPort=57423, ipcPort=37393) Starting thread to transfer block blk_4164803938547019182_1016 to 127.0.0.1:50485 
    [junit] 2011-01-28 16:47:25,926 INFO  datanode.DataNode (DataNode.java:transferBlock(1213)) - DatanodeRegistration(127.0.0.1:36514, storageID=DS-893032692-127.0.1.1-36514-1296232639714, infoPort=57423, ipcPort=37393) Starting thread to transfer block blk_4278625428280162445_1020 to 127.0.0.1:50485 
    [junit] 2011-01-28 16:47:25,926 WARN  datanode.DataNode (DataNode.java:run(1402)) - DatanodeRegistration(127.0.0.1:36514, storageID=DS-893032692-127.0.1.1-36514-1296232639714, infoPort=57423, ipcPort=37393):Failed to transfer blk_4164803938547019182_1016 to 127.0.0.1:50485 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1370)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-28 16:47:25,927 WARN  datanode.DataNode (DataNode.java:run(1402)) - DatanodeRegistration(127.0.0.1:36514, storageID=DS-893032692-127.0.1.1-36514-1296232639714, infoPort=57423, ipcPort=37393):Failed to transfer blk_4278625428280162445_1020 to 127.0.0.1:50485 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1370)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-28 16:47:27,296 INFO  hdfs.StateChange (BlockManager.java:computeReplicationWorkForBlock(935)) - BLOCK* ask 127.0.0.1:36514 to replicate blk_9000474117190497559_1015 to datanode(s) 127.0.0.1:50485
    [junit] 2011-01-28 16:47:28,927 INFO  datanode.DataNode (DataNode.java:transferBlock(1213)) - DatanodeRegistration(127.0.0.1:36514, storageID=DS-893032692-127.0.1.1-36514-1296232639714, infoPort=57423, ipcPort=37393) Starting thread to transfer block blk_9000474117190497559_1015 to 127.0.0.1:50485 
    [junit] 2011-01-28 16:47:28,927 WARN  datanode.DataNode (DataNode.java:run(1402)) - DatanodeRegistration(127.0.0.1:36514, storageID=DS-893032692-127.0.1.1-36514-1296232639714, infoPort=57423, ipcPort=37393):Failed to transfer blk_9000474117190497559_1015 to 127.0.0.1:50485 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1370)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] /homes/hudson/tools/java/jdk1.6.0_11-32/jre/lib/rt.jar: error reading zip file
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
FAILED:  TEST-org.apache.hadoop.hdfs.TestFileCreationClient.xml.<init>

Error Message:


Stack Trace:
Test report file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/TEST-org.apache.hadoop.hdfs.TestFileCreationClient.xml was length 0

FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1510)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:421)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:512)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:282)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:264)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1575)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 2af0a037575b78fd3e690135ffb0d8c7 but expecting 23e55a458d1b3e2a8972fe15879ea798

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 2af0a037575b78fd3e690135ffb0d8c7 but expecting 23e55a458d1b3e2a8972fe15879ea798
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4u8h(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 564 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/564/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 667230 lines...]
    [junit] 2011-01-27 12:34:15,485 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-27 12:34:15,485 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-01-27 12:34:15,587 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 56304
    [junit] 2011-01-27 12:34:15,587 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 56304: exiting
    [junit] 2011-01-27 12:34:15,587 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 56304
    [junit] 2011-01-27 12:34:15,588 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-01-27 12:34:15,588 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:43301, storageID=DS-1801895921-127.0.1.1-43301-1296131644491, infoPort=60765, ipcPort=56304):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-27 12:34:15,588 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-27 12:34:15,590 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-27 12:34:15,602 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-27 12:34:15,691 INFO  datanode.DataNode (DataNode.java:run(1459)) - DatanodeRegistration(127.0.0.1:43301, storageID=DS-1801895921-127.0.1.1-43301-1296131644491, infoPort=60765, ipcPort=56304):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-27 12:34:15,691 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 56304
    [junit] 2011-01-27 12:34:15,691 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-27 12:34:15,691 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-01-27 12:34:15,691 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-27 12:34:15,692 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-27 12:34:15,809 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2845)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-27 12:34:15,809 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(595)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3 
    [junit] 2011-01-27 12:34:15,809 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-27 12:34:15,811 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 36263
    [junit] 2011-01-27 12:34:15,811 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 36263: exiting
    [junit] 2011-01-27 12:34:15,811 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 36263: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.814 sec
    [junit] 2011-01-27 12:34:15,811 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 36263: exiting
    [junit] 2011-01-27 12:34:15,811 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 36263: exiting
    [junit] 2011-01-27 12:34:15,811 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 36263: exiting
    [junit] 2011-01-27 12:34:15,815 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-27 12:34:15,815 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 36263
    [junit] 2011-01-27 12:34:15,812 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 36263: exiting
    [junit] 2011-01-27 12:34:15,812 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 36263: exiting
    [junit] 2011-01-27 12:34:15,812 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 36263: exiting
    [junit] 2011-01-27 12:34:15,812 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 36263: exiting
    [junit] 2011-01-27 12:34:15,812 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 36263: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 60 minutes 7 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Listener.<init>(Server.java:318)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1501)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:421)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:512)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:282)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:264)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1575)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 8d9c47028f2505ffd04f854bb750f447 but expecting 48630ac252ec56516825c4b96c55a464

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 8d9c47028f2505ffd04f854bb750f447 but expecting 48630ac252ec56516825c4b96c55a464
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4u8h(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 563 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/563/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 647053 lines...]
    [junit] 2011-01-26 12:43:46,849 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-26 12:43:46,849 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-01-26 12:43:46,951 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 47899
    [junit] 2011-01-26 12:43:46,951 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 47899: exiting
    [junit] 2011-01-26 12:43:46,951 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 47899
    [junit] 2011-01-26 12:43:46,952 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-26 12:43:46,952 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-01-26 12:43:46,952 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:40155, storageID=DS-341133611-127.0.1.1-40155-1296045815829, infoPort=49313, ipcPort=47899):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-26 12:43:46,954 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-26 12:43:47,013 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-26 12:43:47,055 INFO  datanode.DataNode (DataNode.java:run(1459)) - DatanodeRegistration(127.0.0.1:40155, storageID=DS-341133611-127.0.1.1-40155-1296045815829, infoPort=49313, ipcPort=47899):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-26 12:43:47,055 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 47899
    [junit] 2011-01-26 12:43:47,055 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-26 12:43:47,055 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-01-26 12:43:47,056 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-26 12:43:47,056 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-26 12:43:47,158 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-26 12:43:47,158 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(595)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 4 3 
    [junit] 2011-01-26 12:43:47,158 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2845)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-26 12:43:47,159 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 40071
    [junit] 2011-01-26 12:43:47,160 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 40071: exiting
    [junit] 2011-01-26 12:43:47,160 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 40071: exiting
    [junit] 2011-01-26 12:43:47,160 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 40071: exiting
    [junit] 2011-01-26 12:43:47,160 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 40071: exiting
    [junit] 2011-01-26 12:43:47,160 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 40071: exiting
    [junit] 2011-01-26 12:43:47,161 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 40071: exiting
    [junit] 2011-01-26 12:43:47,161 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 40071: exiting
    [junit] 2011-01-26 12:43:47,160 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 40071
    [junit] 2011-01-26 12:43:47,160 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 40071: exiting
    [junit] 2011-01-26 12:43:47,160 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-26 12:43:47,160 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 40071: exiting
    [junit] 2011-01-26 12:43:47,160 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 40071: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.796 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 69 minutes 45 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files  at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)  at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)  at org.apache.hadoop.util.Shell.run(Shell.java:188)  at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)  at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)  at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)  at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)  at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)  at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)  at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)  at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)  at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)  at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)  at junit.framework.TestCase.runBare(TestCase.java:132)  at junit.framework.TestResult$1.protect(TestResult.java:110)  at junit.framework.TestResult.runProtected(TestResult.java:128)  at junit.framework.TestResult.run(TestResult.java:113)  at junit.framework.TestCase.run(TestCase.java:124)  at junit.framework.TestSuite.runTest(TestSuite.java:232)  at junit.framework.TestSuite.run(TestSuite.java:227)  at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)  at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768) Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files  at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)  at java.lang.ProcessImpl.start(ProcessImpl.java:65)  at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)  ... 34 more 

Stack Trace:
java.lang.RuntimeException: Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)
	at org.apache.hadoop.util.Shell.run(Shell.java:188)
	at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)
	at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)
	at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
	at java.lang.ProcessImpl.start(ProcessImpl.java:65)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)

	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:516)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1546)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1411)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1357)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:600)
	at org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:804)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5trj2(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1460)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 87b07083c117822ee26f779fa267c6ab but expecting 7c1fb2aa29ea4b91ea5edb49d9e01be3

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 87b07083c117822ee26f779fa267c6ab but expecting 7c1fb2aa29ea4b91ea5edb49d9e01be3
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4u8h(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 562 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/562/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 573269 lines...]
    [junit] 2011-01-25 12:15:42,802 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_961040353182632342_1026 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir33/blk_961040353182632342
    [junit] 2011-01-25 12:15:42,802 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_446157499050749557_1002 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir25/blk_446157499050749557
    [junit] 2011-01-25 12:15:42,802 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_964842134414266232_1024 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir32/blk_964842134414266232
    [junit] 2011-01-25 12:15:42,802 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_738509093523947015_1029 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir46/blk_738509093523947015
    [junit] 2011-01-25 12:15:42,802 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_1127292827865635932_1063 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir8/blk_1127292827865635932
    [junit] 2011-01-25 12:15:42,802 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_1074191071901886407_1034 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir40/blk_1074191071901886407
    [junit] 2011-01-25 12:15:42,802 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_1902601199753909749_1090 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir27/subdir16/blk_1902601199753909749
    [junit] 2011-01-25 12:15:42,802 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_1618063984155724584_1088 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir55/subdir33/blk_1618063984155724584
    [junit] 2011-01-25 12:15:42,803 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2227869664594473064_1020 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir39/blk_2227869664594473064
    [junit] 2011-01-25 12:15:42,803 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2171210850407407022_1091 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir55/subdir35/blk_2171210850407407022
    [junit] 2011-01-25 12:15:42,803 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2455440296791953441_1030 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir47/blk_2455440296791953441
    [junit] 2011-01-25 12:15:42,803 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2388001064691005379_1051 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir53/blk_2388001064691005379
    [junit] 2011-01-25 12:15:42,803 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2508651084571201544_1063 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir8/blk_2508651084571201544
    [junit] 2011-01-25 12:15:42,803 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2758265278126528354_1047 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir59/blk_2758265278126528354
    [junit] 2011-01-25 12:15:42,803 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2437773594957809922_1041 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir45/blk_2437773594957809922
    [junit] 2011-01-25 12:15:42,803 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2988360729936221987_1053 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir1/blk_2988360729936221987
    [junit] 2011-01-25 12:15:42,803 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2713481502210030747_1066 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir0/blk_2713481502210030747
    [junit] 2011-01-25 12:15:42,803 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2739612055494845999_1024 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir32/blk_2739612055494845999
    [junit] 2011-01-25 12:15:42,804 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3254936570187730926_1021 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir29/blk_3254936570187730926
    [junit] 2011-01-25 12:15:42,804 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3763067041105686630_1007 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir19/blk_3763067041105686630
    [junit] 2011-01-25 12:15:42,804 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3876411904082827669_1039 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir43/blk_3876411904082827669
    [junit] 2011-01-25 12:15:42,804 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3943746298619770439_1073 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir6/blk_3943746298619770439
    [junit] 2011-01-25 12:15:42,804 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4041444637223884935_1042 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir46/blk_4041444637223884935
    [junit] 2011-01-25 12:15:42,804 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4540719604872719765_1057 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir58/blk_4540719604872719765
    [junit] 2011-01-25 12:15:42,805 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4879067905492070089_1090 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir55/subdir35/blk_4879067905492070089
    [junit] 2011-01-25 12:15:42,805 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4995987663594631717_1043 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir47/blk_4995987663594631717
    [junit] 2011-01-25 12:15:42,805 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5199545676638496548_1025 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir33/blk_5199545676638496548
    [junit] 2011-01-25 12:15:42,805 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5211816703353563576_1056 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir57/blk_5211816703353563576
    [junit] 2011-01-25 12:15:42,805 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5615447476054691582_1170 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir55/subdir43/blk_5615447476054691582
    [junit] 2011-01-25 12:15:42,805 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5667523051340627108_1030 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir36/blk_5667523051340627108
    [junit] 2011-01-25 12:15:42,805 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6273218017907853637_1035 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir40/blk_6273218017907853637
    [junit] 2011-01-25 12:15:42,806 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6697547329860550044_1023 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir31/blk_6697547329860550044
    [junit] 2011-01-25 12:15:42,806 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6776047791403880196_1041 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir45/blk_6776047791403880196
    [junit] 2011-01-25 12:15:42,806 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7252115787493860044_1098 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir55/subdir41/blk_7252115787493860044
    [junit] 2011-01-25 12:15:42,806 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7914037756210194442_1060 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir60/blk_7914037756210194442
    [junit] 2011-01-25 12:15:42,806 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7917675275045429391_1079 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir11/blk_7917675275045429391
    [junit] 2011-01-25 12:15:42,806 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8505591169931231997_1031 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir37/blk_8505591169931231997
    [junit] 2011-01-25 12:15:42,806 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8650918501885291651_1014 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir24/blk_8650918501885291651
    [junit] 2011-01-25 12:15:42,807 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3014671737021628360_1082 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir23/blk_3014671737021628360
    [junit] 2011-01-25 12:15:42,807 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3889288170164364548_1002 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir25/blk_3889288170164364548
    [junit] 2011-01-25 12:15:42,807 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3940713466280337615_1009 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir30/blk_3940713466280337615
    [junit] 2011-01-25 12:15:42,807 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4106729813216391152_1002 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir25/blk_4106729813216391152
    [junit] 2011-01-25 12:15:42,807 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4558944545511671919_1085 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir27/subdir12/blk_4558944545511671919
    [junit] 2011-01-25 12:15:42,807 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5599342282372591556_1008 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir29/blk_5599342282372591556
    [junit] 2011-01-25 12:15:42,807 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7039449800417662976_1053 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir1/blk_7039449800417662976
    [junit] 2011-01-25 12:15:42,808 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7788940987783747456_1065 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir10/blk_7788940987783747456
    [junit] 2011-01-25 12:15:42,808 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7996808887604805331_1069 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir13/blk_7996808887604805331
    [junit] 2011-01-25 12:15:42,808 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8223711534857209888_1009 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir30/blk_8223711534857209888
    [junit] 2011-01-25 12:15:42,808 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8733945355303577987_1024 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir41/blk_8733945355303577987
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
FAILED:  TEST-org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete.xml.<init>

Error Message:


Stack Trace:
Test report file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/TEST-org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete.xml was length 0

FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Listener.<init>(Server.java:318)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1501)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:421)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:512)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:282)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:264)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1575)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:315)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5trj2(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of e1576f22ac830ba4bec6c25d98d5bb7a but expecting 988fda9d1a63b68e26326afc49fc02f2

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of e1576f22ac830ba4bec6c25d98d5bb7a but expecting 988fda9d1a63b68e26326afc49fc02f2
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4u8h(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 561 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/561/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 631813 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:445)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:633)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-01-24 12:41:03,771 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 2
    [junit] 2011-01-24 12:41:03,838 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-24 12:41:03,873 INFO  datanode.DataNode (DataNode.java:run(1459)) - DatanodeRegistration(127.0.0.1:43680, storageID=DS-1566108772-127.0.1.1-43680-1295872852763, infoPort=54806, ipcPort=36319):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-24 12:41:03,873 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 36319
    [junit] 2011-01-24 12:41:03,873 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-24 12:41:03,873 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-01-24 12:41:03,874 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-24 12:41:03,885 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-24 12:41:03,987 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2845)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-24 12:41:03,987 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-24 12:41:03,988 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(595)) - Number of transactions: 6 Total time for transactions(ms): 3Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 3 
    [junit] 2011-01-24 12:41:03,989 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 35205
    [junit] 2011-01-24 12:41:03,990 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 35205: exiting
    [junit] 2011-01-24 12:41:03,990 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 35205: exiting
    [junit] 2011-01-24 12:41:03,990 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 35205: exiting
    [junit] 2011-01-24 12:41:03,990 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 35205: exiting
    [junit] 2011-01-24 12:41:03,990 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 35205: exiting
    [junit] 2011-01-24 12:41:03,991 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 35205: exiting
    [junit] 2011-01-24 12:41:03,991 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 35205: exiting
    [junit] 2011-01-24 12:41:03,991 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 35205: exiting
    [junit] 2011-01-24 12:41:03,990 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 35205: exiting
    [junit] 2011-01-24 12:41:03,990 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 35205: exiting
    [junit] 2011-01-24 12:41:03,992 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 35205
    [junit] 2011-01-24 12:41:03,992 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.424 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 67 minutes 50 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.EPollArrayWrapper.epollCreate(Native Method)
	at sun.nio.ch.EPollArrayWrapper.<init>(EPollArrayWrapper.java:68)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:52)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1510)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:421)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:512)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:282)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:264)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1575)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:315)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5trj2(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 865ce6c6822ca057936060e9c65bc329 but expecting fdf1c0a8c27c00c2bd5f220ab2eea68d

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 865ce6c6822ca057936060e9c65bc329 but expecting fdf1c0a8c27c00c2bd5f220ab2eea68d
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4u8h(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 560 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/560/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 681479 lines...]
    [junit] 2011-01-23 12:44:57,128 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-23 12:44:57,129 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-01-23 12:44:57,230 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 57863
    [junit] 2011-01-23 12:44:57,231 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 57863: exiting
    [junit] 2011-01-23 12:44:57,231 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 57863
    [junit] 2011-01-23 12:44:57,231 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-23 12:44:57,231 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-01-23 12:44:57,232 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:50427, storageID=DS-1478913622-127.0.1.1-50427-1295786686189, infoPort=42276, ipcPort=57863):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-23 12:44:57,234 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-23 12:44:57,335 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-23 12:44:57,335 INFO  datanode.DataNode (DataNode.java:run(1459)) - DatanodeRegistration(127.0.0.1:50427, storageID=DS-1478913622-127.0.1.1-50427-1295786686189, infoPort=42276, ipcPort=57863):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-23 12:44:57,335 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 57863
    [junit] 2011-01-23 12:44:57,335 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-23 12:44:57,336 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-01-23 12:44:57,336 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-23 12:44:57,336 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-23 12:44:57,438 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2845)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-23 12:44:57,438 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(595)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 2 
    [junit] 2011-01-23 12:44:57,438 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-23 12:44:57,440 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 48893
    [junit] 2011-01-23 12:44:57,440 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 48893: exiting
    [junit] 2011-01-23 12:44:57,441 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 48893: exiting
    [junit] 2011-01-23 12:44:57,441 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 48893: exiting
    [junit] 2011-01-23 12:44:57,441 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 48893: exiting
    [junit] 2011-01-23 12:44:57,441 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 48893
    [junit] 2011-01-23 12:44:57,442 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-23 12:44:57,441 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 48893: exiting
    [junit] 2011-01-23 12:44:57,441 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 48893: exiting
    [junit] 2011-01-23 12:44:57,441 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 48893: exiting
    [junit] 2011-01-23 12:44:57,441 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 48893: exiting
    [junit] 2011-01-23 12:44:57,441 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 48893: exiting
    [junit] 2011-01-23 12:44:57,441 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 48893: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.859 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: Tests failed!

Total time: 71 minutes 6 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1546)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1411)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1357)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:600)
	at org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:804)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5trj2(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1460)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of abfff04cc862a7da918282f58438740b but expecting d8fa49fb922657b3bd422ab1c51b7ea2

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of abfff04cc862a7da918282f58438740b but expecting d8fa49fb922657b3bd422ab1c51b7ea2
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4u8h(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 559 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/559/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 649311 lines...]
    [junit] 2011-01-21 12:32:41,804 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-21 12:32:41,804 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(831)) - Shutting down DataNode 0
    [junit] 2011-01-21 12:32:41,906 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 60758
    [junit] 2011-01-21 12:32:41,906 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 60758: exiting
    [junit] 2011-01-21 12:32:41,907 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 60758
    [junit] 2011-01-21 12:32:41,907 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-01-21 12:32:41,907 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:37057, storageID=DS-1481487460-127.0.1.1-37057-1295613150825, infoPort=56900, ipcPort=60758):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-21 12:32:41,907 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-21 12:32:41,909 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-21 12:32:42,010 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-21 12:32:42,010 INFO  datanode.DataNode (DataNode.java:run(1459)) - DatanodeRegistration(127.0.0.1:37057, storageID=DS-1481487460-127.0.1.1-37057-1295613150825, infoPort=56900, ipcPort=60758):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-21 12:32:42,010 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 60758
    [junit] 2011-01-21 12:32:42,010 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-21 12:32:42,011 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-01-21 12:32:42,011 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-21 12:32:42,011 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-21 12:32:42,116 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2844)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-21 12:32:42,117 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 3 
    [junit] 2011-01-21 12:32:42,116 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-21 12:32:42,119 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 38925
    [junit] 2011-01-21 12:32:42,119 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 38925: exiting
    [junit] 2011-01-21 12:32:42,119 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 38925: exiting
    [junit] 2011-01-21 12:32:42,120 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 38925: exiting
    [junit] 2011-01-21 12:32:42,120 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 38925: exiting
    [junit] 2011-01-21 12:32:42,120 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-21 12:32:42,120 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 38925: exiting
    [junit] 2011-01-21 12:32:42,120 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 38925: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.615 sec
    [junit] 2011-01-21 12:32:42,120 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 38925: exiting
    [junit] 2011-01-21 12:32:42,120 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 38925: exiting
    [junit] 2011-01-21 12:32:42,120 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 38925: exiting
    [junit] 2011-01-21 12:32:42,120 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 38925
    [junit] 2011-01-21 12:32:42,119 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 38925: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:744: Tests failed!

Total time: 59 minutes 18 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
error occurred, see log above

Stack Trace:
junit.framework.AssertionFailedError: error occurred, see log above
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:391)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqzn(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files  at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)  at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)  at org.apache.hadoop.util.Shell.run(Shell.java:188)  at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)  at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)  at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)  at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)  at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)  at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)  at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)  at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:674)  at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:479)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:199)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:74)  at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:191)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)  at junit.framework.TestCase.runBare(TestCase.java:132)  at junit.framework.TestResult$1.protect(TestResult.java:110)  at junit.framework.TestResult.runProtected(TestResult.java:128)  at junit.framework.TestResult.run(TestResult.java:113)  at junit.framework.TestCase.run(TestCase.java:124)  at junit.framework.TestSuite.runTest(TestSuite.java:232)  at junit.framework.TestSuite.run(TestSuite.java:227)  at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)  at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768) Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files  at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)  at java.lang.ProcessImpl.start(ProcessImpl.java:65)  at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)  ... 34 more 

Stack Trace:
java.lang.RuntimeException: Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)
	at org.apache.hadoop.util.Shell.run(Shell.java:188)
	at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)
	at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)
	at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:674)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:479)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:199)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:74)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:191)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
	at java.lang.ProcessImpl.start(ProcessImpl.java:65)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)

	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:516)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:674)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:479)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:199)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:74)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:191)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:466)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:199)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:74)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:191)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 60282180484a5c0716bd78ff7e0393ee but expecting 833c475fe39fecac0a6e749ce1e8af61

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 60282180484a5c0716bd78ff7e0393ee but expecting 833c475fe39fecac0a6e749ce1e8af61
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tma(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 558 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/558/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 666794 lines...]
    [junit] 2011-01-20 12:32:12,854 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-20 12:32:12,854 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(831)) - Shutting down DataNode 0
    [junit] 2011-01-20 12:32:12,956 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 37101
    [junit] 2011-01-20 12:32:12,956 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 37101: exiting
    [junit] 2011-01-20 12:32:12,956 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 37101
    [junit] 2011-01-20 12:32:12,956 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-01-20 12:32:12,957 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-20 12:32:12,957 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:44942, storageID=DS-291946037-127.0.1.1-44942-1295526721841, infoPort=37284, ipcPort=37101):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-20 12:32:12,959 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-20 12:32:13,059 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-20 12:32:13,060 INFO  datanode.DataNode (DataNode.java:run(1459)) - DatanodeRegistration(127.0.0.1:44942, storageID=DS-291946037-127.0.1.1-44942-1295526721841, infoPort=37284, ipcPort=37101):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-20 12:32:13,060 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 37101
    [junit] 2011-01-20 12:32:13,060 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-20 12:32:13,061 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-01-20 12:32:13,061 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-20 12:32:13,061 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-20 12:32:13,163 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2844)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-20 12:32:13,163 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 10 2 
    [junit] 2011-01-20 12:32:13,164 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-20 12:32:13,165 INFO  ipc.Server (Server.java:stop(1610)) - Stopping server on 60156
    [junit] 2011-01-20 12:32:13,165 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 0 on 60156: exiting
    [junit] 2011-01-20 12:32:13,166 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 9 on 60156: exiting
    [junit] 2011-01-20 12:32:13,166 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 2 on 60156: exiting
    [junit] 2011-01-20 12:32:13,166 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 1 on 60156: exiting
    [junit] 2011-01-20 12:32:13,167 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 3 on 60156: exiting
    [junit] 2011-01-20 12:32:13,166 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.507 sec
    [junit] 2011-01-20 12:32:13,166 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 5 on 60156: exiting
    [junit] 2011-01-20 12:32:13,166 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 8 on 60156: exiting
    [junit] 2011-01-20 12:32:13,166 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 60156
    [junit] 2011-01-20 12:32:13,166 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 7 on 60156: exiting
    [junit] 2011-01-20 12:32:13,166 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 4 on 60156: exiting
    [junit] 2011-01-20 12:32:13,168 INFO  ipc.Server (Server.java:run(1443)) - IPC Server handler 6 on 60156: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:744: Tests failed!

Total time: 58 minutes 54 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Listener.<init>(Server.java:318)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1501)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:421)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:512)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:282)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:264)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1575)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:674)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:479)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:199)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:74)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:191)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:466)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:199)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:74)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:191)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:466)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:199)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:74)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:191)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 5d56d76f2edb6df1d35f4b0ebc6a6454 but expecting 55b614bdf02a63d831043b08e74543dc

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 5d56d76f2edb6df1d35f4b0ebc6a6454 but expecting 55b614bdf02a63d831043b08e74543dc
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tme(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 557 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/557/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 613732 lines...]
    [junit] 2011-01-19 12:17:20,817 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_4991652345072530302_1080 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir47/blk_4991652345072530302 for deletion
    [junit] 2011-01-19 12:17:20,817 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4930302726818238705_1080 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir48/blk_4930302726818238705
    [junit] 2011-01-19 12:17:20,817 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_5409886590221017837_1013 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir13/blk_5409886590221017837 for deletion
    [junit] 2011-01-19 12:17:20,817 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4991652345072530302_1080 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir47/blk_4991652345072530302
    [junit] 2011-01-19 12:17:20,818 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_5501672040967286899_1013 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir13/blk_5501672040967286899 for deletion
    [junit] 2011-01-19 12:17:20,818 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5409886590221017837_1013 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir13/blk_5409886590221017837
    [junit] 2011-01-19 12:17:20,818 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5501672040967286899_1013 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir13/blk_5501672040967286899
    [junit] 2011-01-19 12:17:20,818 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_5757636479118090014_1076 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir44/blk_5757636479118090014 for deletion
    [junit] 2011-01-19 12:17:20,818 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_5811111900818128598_1060 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir50/blk_5811111900818128598 for deletion
    [junit] 2011-01-19 12:17:20,818 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5757636479118090014_1076 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir44/blk_5757636479118090014
    [junit] 2011-01-19 12:17:20,819 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5811111900818128598_1060 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir50/blk_5811111900818128598
    [junit] 2011-01-19 12:17:20,819 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_5919253124213601097_1048 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir22/blk_5919253124213601097 for deletion
    [junit] 2011-01-19 12:17:20,819 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6135514938522389004_1024 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir4/blk_6135514938522389004 for deletion
    [junit] 2011-01-19 12:17:20,819 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5919253124213601097_1048 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir22/blk_5919253124213601097
    [junit] 2011-01-19 12:17:20,819 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6153785705450926754_1078 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir46/blk_6153785705450926754 for deletion
    [junit] 2011-01-19 12:17:20,819 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6135514938522389004_1024 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir4/blk_6135514938522389004
    [junit] 2011-01-19 12:17:20,819 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6269264091216094863_1040 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir16/blk_6269264091216094863 for deletion
    [junit] 2011-01-19 12:17:20,819 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6153785705450926754_1078 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir46/blk_6153785705450926754
    [junit] 2011-01-19 12:17:20,820 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6314037281295753989_1161 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir38/subdir6/blk_6314037281295753989 for deletion
    [junit] 2011-01-19 12:17:20,820 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6269264091216094863_1040 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir16/blk_6269264091216094863
    [junit] 2011-01-19 12:17:20,820 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6346730224261856352_1036 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir32/blk_6346730224261856352 for deletion
    [junit] 2011-01-19 12:17:20,820 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6314037281295753989_1161 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir38/subdir6/blk_6314037281295753989
    [junit] 2011-01-19 12:17:20,820 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6346730224261856352_1036 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir32/blk_6346730224261856352
    [junit] 2011-01-19 12:17:20,820 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6470085134963045709_1039 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir16/blk_6470085134963045709 for deletion
    [junit] 2011-01-19 12:17:20,821 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6470085134963045709_1039 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir16/blk_6470085134963045709
    [junit] 2011-01-19 12:17:20,821 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6568120848106210207_1008 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir10/blk_6568120848106210207 for deletion
    [junit] 2011-01-19 12:17:20,821 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6800761260689123813_1022 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir2/blk_6800761260689123813 for deletion
    [junit] 2011-01-19 12:17:20,821 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6568120848106210207_1008 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir10/blk_6568120848106210207
    [junit] 2011-01-19 12:17:20,821 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7066208572186316257_1063 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir34/blk_7066208572186316257 for deletion
    [junit] 2011-01-19 12:17:20,821 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6800761260689123813_1022 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir2/blk_6800761260689123813
    [junit] 2011-01-19 12:17:20,821 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7168893440597103146_1077 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir63/blk_7168893440597103146 for deletion
    [junit] 2011-01-19 12:17:20,822 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7066208572186316257_1063 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir34/blk_7066208572186316257
    [junit] 2011-01-19 12:17:20,822 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7168893440597103146_1077 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir63/blk_7168893440597103146
    [junit] 2011-01-19 12:17:20,822 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7183651146678087919_1074 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir43/blk_7183651146678087919 for deletion
    [junit] 2011-01-19 12:17:20,822 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7229036321995578940_1022 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir20/blk_7229036321995578940 for deletion
    [junit] 2011-01-19 12:17:20,822 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7183651146678087919_1074 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir43/blk_7183651146678087919
    [junit] 2011-01-19 12:17:20,822 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7229036321995578940_1022 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir20/blk_7229036321995578940
    [junit] 2011-01-19 12:17:20,822 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7852128987396660278_1095 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir31/subdir32/blk_7852128987396660278 for deletion
    [junit] 2011-01-19 12:17:20,823 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8106104073285198962_1100 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir38/subdir5/blk_8106104073285198962 for deletion
    [junit] 2011-01-19 12:17:20,823 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7852128987396660278_1095 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir31/subdir32/blk_7852128987396660278
    [junit] 2011-01-19 12:17:20,823 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8106104073285198962_1100 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir38/subdir5/blk_8106104073285198962
    [junit] 2011-01-19 12:17:20,823 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8146550930561649748_1071 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir58/blk_8146550930561649748 for deletion
    [junit] 2011-01-19 12:17:20,823 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8912575372540419822_1020 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir1/blk_8912575372540419822 for deletion
    [junit] 2011-01-19 12:17:20,823 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8146550930561649748_1071 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir58/blk_8146550930561649748
    [junit] 2011-01-19 12:17:20,823 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_9124096911676163411_1049 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir23/blk_9124096911676163411 for deletion
    [junit] 2011-01-19 12:17:20,823 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8912575372540419822_1020 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir1/blk_8912575372540419822
    [junit] 2011-01-19 12:17:20,823 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_9212301245574749231_1053 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir44/blk_9212301245574749231 for deletion
    [junit] 2011-01-19 12:17:20,824 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_9124096911676163411_1049 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir23/blk_9124096911676163411
    [junit] 2011-01-19 12:17:20,824 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_9212301245574749231_1053 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir44/blk_9212301245574749231
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  TEST-org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete.xml.<init>

Error Message:


Stack Trace:
Test report file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/TEST-org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete.xml was length 0

FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of d51fcf122acc432fe027fdf704c5e102 but expecting 11a91bb8226f92270f50664ee896bf64

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of d51fcf122acc432fe027fdf704c5e102 but expecting 11a91bb8226f92270f50664ee896bf64
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tmc(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 556 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/556/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 54365 lines...]
    [junit] 2011-01-19 11:29:24,127 INFO  ipc.Server (Server.java:run(1368)) - IPC Server handler 1 on 32993: starting
    [junit] 2011-01-19 11:29:24,126 INFO  ipc.Server (Server.java:run(1368)) - IPC Server handler 0 on 32993: starting
    [junit] 2011-01-19 11:29:24,128 INFO  ipc.Server (Server.java:run(1368)) - IPC Server handler 2 on 32993: starting
    [junit] 2011-01-19 11:29:24,128 INFO  datanode.DataNode (DataNode.java:offerService(904)) - using BLOCKREPORT_INTERVAL of 21600000msec Initial delay: 0msec
    [junit] 2011-01-19 11:29:24,141 INFO  datanode.DataNode (DataNode.java:blockReport(1143)) - BlockReport of 0 blocks got processed in 8 msecs
    [junit] 2011-01-19 11:29:24,141 INFO  datanode.DataNode (DataNode.java:offerService(946)) - Starting Periodic block scanner.
    [junit] Starting DataNode 1 with dfs.datanode.data.dir: file:/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/,file:/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data4/
    [junit] 2011-01-19 11:30:27,271 INFO  security.UserGroupInformation (UserGroupInformation.java:initUGI(259)) - JAAS Configuration already set up for Hadoop, not re-installing.
    [junit] 2011-01-19 11:31:09,410 INFO  datanode.DataNode (DataNode.java:registerMXBean(536)) - DataNode MXBean already registered
    [junit] 2011-01-19 11:31:09,411 INFO  datanode.DataNode (DataNode.java:initDataXceiver(472)) - Opened info server at 55692
    [junit] 2011-01-19 11:31:09,411 INFO  datanode.DataNode (DataXceiverServer.java:<init>(77)) - Balancing bandwith is 1048576 bytes/s
    [junit] 2011-01-19 11:31:09,413 INFO  common.Storage (DataStorage.java:recoverTransitionRead(127)) - Storage directory /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3 is not formatted.
    [junit] 2011-01-19 11:31:09,414 INFO  common.Storage (DataStorage.java:recoverTransitionRead(128)) - Formatting ...
    [junit] 2011-01-19 11:31:09,416 INFO  common.Storage (DataStorage.java:recoverTransitionRead(127)) - Storage directory /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data4 is not formatted.
    [junit] 2011-01-19 11:31:09,416 INFO  common.Storage (DataStorage.java:recoverTransitionRead(128)) - Formatting ...
    [junit] 2011-01-19 11:31:51,503 INFO  datanode.DataNode (FSDataset.java:registerMBean(1772)) - Registered FSDatasetStatusMBean
    [junit] 2011-01-19 11:31:51,503 INFO  datanode.DirectoryScanner (DirectoryScanner.java:<init>(149)) - scan starts at 1295446363503 with interval 21600000
    [junit] 2011-01-19 11:31:51,505 INFO  http.HttpServer (HttpServer.java:addGlobalFilter(409)) - Added global filtersafety (class=org.apache.hadoop.http.HttpServer$QuotingInputFilter)
    [junit] 2011-01-19 11:31:51,506 INFO  http.HttpServer (HttpServer.java:start(579)) - Port returned by webServer.getConnectors()[0].getLocalPort() before open() is -1. Opening the listener on 0
    [junit] 2011-01-19 11:31:51,507 INFO  http.HttpServer (HttpServer.java:start(584)) - listener.getLocalPort() returned 44075 webServer.getConnectors()[0].getLocalPort() returned 44075
    [junit] 2011-01-19 11:31:51,507 INFO  http.HttpServer (HttpServer.java:start(617)) - Jetty bound to port 44075
    [junit] 2011-01-19 11:31:51,508 INFO  mortbay.log (?:invoke0(?)) - jetty-6.1.14
    [junit] 2011-01-19 11:31:51,626 INFO  mortbay.log (?:invoke0(?)) - Started SelectChannelConnector@localhost:44075
    [junit] 2011-01-19 11:31:51,627 INFO  jvm.JvmMetrics (JvmMetrics.java:init(71)) - Cannot initialize JVM Metrics with processName=DataNode, sessionId=null - already initialized
    [junit] 2011-01-19 11:31:51,629 INFO  ipc.Server (Server.java:run(338)) - Starting SocketReader
    [junit] 2011-01-19 11:31:51,629 INFO  metrics.RpcMetrics (RpcMetrics.java:<init>(63)) - Initializing RPC Metrics with hostName=DataNode, port=49250
    [junit] 2011-01-19 11:31:51,630 INFO  metrics.RpcDetailedMetrics (RpcDetailedMetrics.java:<init>(57)) - Initializing RPC Metrics with hostName=DataNode, port=49250
    [junit] 2011-01-19 11:31:51,631 INFO  datanode.DataNode (DataNode.java:initIpcServer(432)) - dnRegistration = DatanodeRegistration(h2.grid.sp2.yahoo.net:55692, storageID=, infoPort=44075, ipcPort=49250)
    [junit] 2011-01-19 11:31:51,633 INFO  hdfs.StateChange (FSNamesystem.java:registerDatanode(2517)) - BLOCK* NameSystem.registerDatanode: node registration from 127.0.0.1:55692 storage DS-1269954937-127.0.1.1-55692-1295436711632
    [junit] 2011-01-19 11:31:51,633 INFO  net.NetworkTopology (NetworkTopology.java:add(331)) - Adding a new node: /default-rack/127.0.0.1:55692
    [junit] 2011-01-19 11:31:51,637 INFO  datanode.DataNode (DataNode.java:register(714)) - New storage id DS-1269954937-127.0.1.1-55692-1295436711632 is assigned to data-node 127.0.0.1:55692
    [junit] 2011-01-19 11:31:51,651 INFO  datanode.DataNode (DataNode.java:run(1438)) - DatanodeRegistration(127.0.0.1:55692, storageID=DS-1269954937-127.0.1.1-55692-1295436711632, infoPort=44075, ipcPort=49250)In DataNode.run, data = FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data4/current/finalized'}
    [junit] 2011-01-19 11:31:51,653 INFO  ipc.Server (Server.java:run(608)) - IPC Server Responder: starting
    [junit] Starting DataNode 2 with dfs.datanode.data.dir: file:/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data5/,file:/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data6/
    [junit] 2011-01-19 11:31:51,654 INFO  security.UserGroupInformation (UserGroupInformation.java:initUGI(259)) - JAAS Configuration already set up for Hadoop, not re-installing.
    [junit] 2011-01-19 11:31:51,662 INFO  ipc.Server (Server.java:run(443)) - IPC Server listener on 49250: starting
    [junit] 2011-01-19 11:31:51,669 INFO  ipc.Server (Server.java:run(1368)) - IPC Server handler 2 on 49250: starting
    [junit] 2011-01-19 11:31:51,669 INFO  ipc.Server (Server.java:run(1368)) - IPC Server handler 0 on 49250: starting
    [junit] 2011-01-19 11:31:51,668 INFO  ipc.Server (Server.java:run(1368)) - IPC Server handler 1 on 49250: starting
    [junit] 2011-01-19 11:31:51,672 INFO  datanode.DataNode (DataNode.java:offerService(904)) - using BLOCKREPORT_INTERVAL of 21600000msec Initial delay: 0msec
    [junit] 2011-01-19 11:31:51,687 INFO  datanode.DataNode (DataNode.java:blockReport(1143)) - BlockReport of 0 blocks got processed in 2 msecs
    [junit] 2011-01-19 11:31:51,688 INFO  datanode.DataNode (DataNode.java:offerService(946)) - Starting Periodic block scanner.
    [junit] 2011-01-19 11:32:33,816 INFO  datanode.DataNode (DataNode.java:registerMXBean(536)) - DataNode MXBean already registered
    [junit] 2011-01-19 11:32:33,817 INFO  datanode.DataNode (DataNode.java:initDataXceiver(472)) - Opened info server at 34155
    [junit] 2011-01-19 11:32:33,818 INFO  datanode.DataNode (DataXceiverServer.java:<init>(77)) - Balancing bandwith is 1048576 bytes/s
    [junit] 2011-01-19 11:32:33,820 INFO  common.Storage (DataStorage.java:recoverTransitionRead(127)) - Storage directory /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data5 is not formatted.
    [junit] 2011-01-19 11:32:33,820 INFO  common.Storage (DataStorage.java:recoverTransitionRead(128)) - Formatting ...
    [junit] 2011-01-19 11:32:33,822 INFO  common.Storage (DataStorage.java:recoverTransitionRead(127)) - Storage directory /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data6 is not formatted.
    [junit] 2011-01-19 11:32:33,823 INFO  common.Storage (DataStorage.java:recoverTransitionRead(128)) - Formatting ...
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
11 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestDFSShell.testErrOutPut

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.TestDFSUpgradeFromImage.testUpgradeFromImage

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.TestDistributedFileSystem.testAllWithDualPort

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.TestFileAppend.testComplexFlush

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.TestFileAppend2.testComplexAppend

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.TestFileAppend3.testAppendToPartialChunk

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.TestHDFSTrash.testTrashEmptier

Error Message:
null

Stack Trace:
junit.framework.AssertionFailedError: null
	at org.apache.hadoop.fs.TestTrash.testTrashEmptier(TestTrash.java:460)
	at junit.extensions.TestDecorator.basicRun(TestDecorator.java:24)
	at junit.extensions.TestSetup$1.protect(TestSetup.java:23)
	at junit.extensions.TestSetup.run(TestSetup.java:27)


REGRESSION:  org.apache.hadoop.hdfs.TestPread.testPreadDFSSimulated

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.TestQuota.testMultipleFilesSmallerThanOneBlock

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.TestRestartDFS.testRestartDualPortDFS

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


FAILED:  TEST-org.apache.hadoop.hdfs.server.datanode.TestDiskError.xml.<init>

Error Message:


Stack Trace:
Test report file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/TEST-org.apache.hadoop.hdfs.server.datanode.TestDiskError.xml was length 0



Hadoop-Hdfs-trunk - Build # 555 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/555/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 339322 lines...]
    [junit] 2011-01-17 12:08:32,623 WARN  datanode.DataNode (DataNode.java:run(1402)) - DatanodeRegistration(127.0.0.1:60114, storageID=DS-1940478843-127.0.1.1-60114-1295265506409, infoPort=53064, ipcPort=57210):Failed to transfer blk_8892008087116593488_1018 to 127.0.0.1:43098 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1370)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-17 12:08:34,310 INFO  hdfs.StateChange (BlockManager.java:computeReplicationWorkForBlock(935)) - BLOCK* ask 127.0.0.1:60114 to replicate blk_9192818613191395145_1016 to datanode(s) 127.0.0.1:43098
    [junit] 2011-01-17 12:08:35,623 INFO  datanode.DataNode (DataNode.java:transferBlock(1213)) - DatanodeRegistration(127.0.0.1:60114, storageID=DS-1940478843-127.0.1.1-60114-1295265506409, infoPort=53064, ipcPort=57210) Starting thread to transfer block blk_9192818613191395145_1016 to 127.0.0.1:43098 
    [junit] 2011-01-17 12:08:35,624 WARN  datanode.DataNode (DataNode.java:run(1402)) - DatanodeRegistration(127.0.0.1:60114, storageID=DS-1940478843-127.0.1.1-60114-1295265506409, infoPort=53064, ipcPort=57210):Failed to transfer blk_9192818613191395145_1016 to 127.0.0.1:43098 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1370)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-17 12:12:09,502 DEBUG datanode.DataNode (BlockSender.java:<init>(142)) - block=blk_-3499279293042666336_1013, replica=FinalizedReplica, blk_-3499279293042666336_1013, FINALIZED
    [junit]   getNumBytes()     = 57
    [junit]   getBytesOnDisk()  = 57
    [junit]   getVisibleLength()= 57
    [junit]   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
    [junit]   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized/blk_-3499279293042666336
    [junit]   unlinked=false
    [junit] 2011-01-17 12:12:09,503 DEBUG datanode.DataNode (BlockSender.java:<init>(237)) - replica=FinalizedReplica, blk_-3499279293042666336_1013, FINALIZED
    [junit]   getNumBytes()     = 57
    [junit]   getBytesOnDisk()  = 57
    [junit]   getVisibleLength()= 57
    [junit]   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized
    [junit]   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data3/current/finalized/blk_-3499279293042666336
    [junit]   unlinked=false
    [junit] 2011-01-17 12:12:09,503 INFO  datanode.DataBlockScanner (DataBlockScanner.java:verifyBlock(447)) - Verification succeeded for blk_-3499279293042666336_1013
    [junit] 2011-01-17 12:12:37,252 DEBUG datanode.DataNode (BlockSender.java:<init>(142)) - block=blk_6232795097492429815_1020, replica=FinalizedReplica, blk_6232795097492429815_1020, FINALIZED
    [junit]   getNumBytes()     = 57
    [junit]   getBytesOnDisk()  = 57
    [junit]   getVisibleLength()= 57
    [junit]   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized
    [junit]   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/blk_6232795097492429815
    [junit]   unlinked=false
    [junit] 2011-01-17 12:12:37,252 DEBUG datanode.DataNode (BlockSender.java:<init>(237)) - replica=FinalizedReplica, blk_6232795097492429815_1020, FINALIZED
    [junit]   getNumBytes()     = 57
    [junit]   getBytesOnDisk()  = 57
    [junit]   getVisibleLength()= 57
    [junit]   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized
    [junit]   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/blk_6232795097492429815
    [junit]   unlinked=false
    [junit] 2011-01-17 12:12:37,252 INFO  datanode.DataBlockScanner (DataBlockScanner.java:verifyBlock(447)) - Verification succeeded for blk_6232795097492429815_1020
Build timed out. Aborting
/tmp/hudson8622830351260089278.sh: line 2: 14997 Terminated              bash ${WORKSPACE}/nightly/commitBuild.sh
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
7 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileAppend4.testRecoverFinalizedBlock

Error Message:
port out of range:-1

Stack Trace:
java.lang.IllegalArgumentException: port out of range:-1
	at java.net.InetSocketAddress.<init>(InetSocketAddress.java:118)
	at org.apache.hadoop.hdfs.server.namenode.NameNode$1.run(NameNode.java:521)
	at org.apache.hadoop.hdfs.server.namenode.NameNode$1.run(NameNode.java:461)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:396)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1138)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.startHttpServer(NameNode.java:461)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.activate(NameNode.java:405)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:389)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:578)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:571)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1534)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:461)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileAppend4.__CLR3_0_21z1ppcy4i(TestFileAppend4.java:151)
	at org.apache.hadoop.hdfs.TestFileAppend4.testRecoverFinalizedBlock(TestFileAppend4.java:150)


REGRESSION:  org.apache.hadoop.hdfs.TestFileAppend4.testCompleteOtherLeaseHoldersFile

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileAppend4.__CLR3_0_269ddf9y5m(TestFileAppend4.java:222)
	at org.apache.hadoop.hdfs.TestFileAppend4.testCompleteOtherLeaseHoldersFile(TestFileAppend4.java:221)


FAILED:  TEST-org.apache.hadoop.hdfs.TestFileCreationClient.xml.<init>

Error Message:


Stack Trace:
Test report file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/TEST-org.apache.hadoop.hdfs.TestFileCreationClient.xml was length 0

FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files  at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)  at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)  at org.apache.hadoop.util.Shell.run(Shell.java:188)  at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)  at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)  at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)  at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)  at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)  at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)  at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)  at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)  at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)  at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)  at junit.framework.TestCase.runBare(TestCase.java:132)  at junit.framework.TestResult$1.protect(TestResult.java:110)  at junit.framework.TestResult.runProtected(TestResult.java:128)  at junit.framework.TestResult.run(TestResult.java:113)  at junit.framework.TestCase.run(TestCase.java:124)  at junit.framework.TestSuite.runTest(TestSuite.java:232)  at junit.framework.TestSuite.run(TestSuite.java:227)  at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)  at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768) Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files  at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)  at java.lang.ProcessImpl.start(ProcessImpl.java:65)  at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)  ... 34 more 

Stack Trace:
java.lang.RuntimeException: Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)
	at org.apache.hadoop.util.Shell.run(Shell.java:188)
	at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)
	at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)
	at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
	at java.lang.ProcessImpl.start(ProcessImpl.java:65)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)

	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:516)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1546)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1411)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1357)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:600)
	at org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:804)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqy9(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1460)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 9654c42cd5aa85110f9454ba70963c15 but expecting 0154686689e7f6f4b63e57509bd7f6e0

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 9654c42cd5aa85110f9454ba70963c15 but expecting 0154686689e7f6f4b63e57509bd7f6e0
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tkw(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 554 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/554/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 646635 lines...]
    [junit] 2011-01-16 13:42:41,136 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-16 13:42:41,137 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-16 13:42:41,137 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(786)) - Shutting down DataNode 0
    [junit] 2011-01-16 13:42:41,239 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 40365
    [junit] 2011-01-16 13:42:41,239 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 40365: exiting
    [junit] 2011-01-16 13:42:41,240 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-16 13:42:41,240 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:48987, storageID=DS-875787204-127.0.1.1-48987-1295185350124, infoPort=33472, ipcPort=40365):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-16 13:42:41,239 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 40365
    [junit] 2011-01-16 13:42:41,240 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-16 13:42:41,341 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-16 13:42:41,341 INFO  datanode.DataNode (DataNode.java:run(1459)) - DatanodeRegistration(127.0.0.1:48987, storageID=DS-875787204-127.0.1.1-48987-1295185350124, infoPort=33472, ipcPort=40365):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-16 13:42:41,342 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 40365
    [junit] 2011-01-16 13:42:41,342 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-16 13:42:41,342 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-01-16 13:42:41,342 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-16 13:42:41,343 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-16 13:42:41,445 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2828)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-16 13:42:41,445 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-16 13:42:41,445 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 7Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 2 
    [junit] 2011-01-16 13:42:41,448 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 44704
    [junit] 2011-01-16 13:42:41,448 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 44704: exiting
    [junit] 2011-01-16 13:42:41,448 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 44704: exiting
    [junit] 2011-01-16 13:42:41,449 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 44704: exiting
    [junit] 2011-01-16 13:42:41,448 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 44704: exiting
    [junit] 2011-01-16 13:42:41,448 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 44704: exiting
    [junit] 2011-01-16 13:42:41,448 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 44704: exiting
    [junit] 2011-01-16 13:42:41,449 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-16 13:42:41,449 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 44704
    [junit] 2011-01-16 13:42:41,449 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 44704: exiting
    [junit] 2011-01-16 13:42:41,448 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 44704: exiting
    [junit] 2011-01-16 13:42:41,448 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 44704: exiting
    [junit] 2011-01-16 13:42:41,448 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 44704: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.754 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:744: Tests failed!

Total time: 128 minutes 0 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
7 tests failed.
REGRESSION:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testErrorReplicas

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files  at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)  at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)  at org.apache.hadoop.util.Shell.run(Shell.java:188)  at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)  at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)  at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)  at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)  at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)  at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)  at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)  at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)  at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)  at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)  at junit.framework.TestCase.runBare(TestCase.java:132)  at junit.framework.TestResult$1.protect(TestResult.java:110)  at junit.framework.TestResult.runProtected(TestResult.java:128)  at junit.framework.TestResult.run(TestResult.java:113)  at junit.framework.TestCase.run(TestCase.java:124)  at junit.framework.TestSuite.runTest(TestSuite.java:232)  at junit.framework.TestSuite.run(TestSuite.java:227)  at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)  at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768) Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files  at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)  at java.lang.ProcessImpl.start(ProcessImpl.java:65)  at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)  ... 34 more 

Stack Trace:
java.lang.RuntimeException: Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)
	at org.apache.hadoop.util.Shell.run(Shell.java:188)
	at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)
	at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)
	at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
	at java.lang.ProcessImpl.start(ProcessImpl.java:65)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)

	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:516)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1546)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1411)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1357)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:600)
	at org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:804)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqy9(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1460)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of c5c77a139e29f513737315af7f42ec2b but expecting 3c62c5aab4ee685a8542a60b95fbeb69

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of c5c77a139e29f513737315af7f42ec2b but expecting 3c62c5aab4ee685a8542a60b95fbeb69
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tkw(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 553 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/553/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 642375 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:445)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:633)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-01-15 12:42:07,178 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-15 12:42:07,278 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-15 12:42:07,279 INFO  datanode.DataNode (DataNode.java:run(1459)) - DatanodeRegistration(127.0.0.1:39238, storageID=DS-234553502-127.0.1.1-39238-1295095316012, infoPort=55922, ipcPort=37784):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-15 12:42:07,279 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 37784
    [junit] 2011-01-15 12:42:07,279 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-15 12:42:07,279 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-01-15 12:42:07,280 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-15 12:42:07,280 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-15 12:42:07,384 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2828)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-15 12:42:07,384 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-15 12:42:07,385 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 2 
    [junit] 2011-01-15 12:42:07,386 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 39706
    [junit] 2011-01-15 12:42:07,386 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 39706: exiting
    [junit] 2011-01-15 12:42:07,386 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 39706: exiting
    [junit] 2011-01-15 12:42:07,387 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-15 12:42:07,387 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 39706: exiting
    [junit] 2011-01-15 12:42:07,387 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 39706: exiting
    [junit] 2011-01-15 12:42:07,388 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 39706: exiting
    [junit] 2011-01-15 12:42:07,388 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 39706: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.759 sec
    [junit] 2011-01-15 12:42:07,387 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 39706
    [junit] 2011-01-15 12:42:07,387 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 39706: exiting
    [junit] 2011-01-15 12:42:07,387 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 39706: exiting
    [junit] 2011-01-15 12:42:07,387 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 39706: exiting
    [junit] 2011-01-15 12:42:07,387 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 39706: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:735: Tests failed!

Total time: 69 minutes 24 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:421)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:512)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:282)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:264)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1575)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of f9b6bde45cd1101ea2571eb68070707b but expecting 2970f17e638fc2cf7e05c80ceba530c5

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of f9b6bde45cd1101ea2571eb68070707b but expecting 2970f17e638fc2cf7e05c80ceba530c5
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tkw(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 552 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/552/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 667252 lines...]
    [junit] 2011-01-14 12:42:57,134 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-14 12:42:57,147 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-14 12:42:57,148 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(786)) - Shutting down DataNode 0
    [junit] 2011-01-14 12:42:57,260 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 59154
    [junit] 2011-01-14 12:42:57,260 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 59154: exiting
    [junit] 2011-01-14 12:42:57,260 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 59154
    [junit] 2011-01-14 12:42:57,260 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:49946, storageID=DS-951812744-127.0.1.1-49946-1295008966339, infoPort=45554, ipcPort=59154):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-14 12:42:57,260 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-01-14 12:42:57,260 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-14 12:42:57,361 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-14 12:42:57,362 INFO  datanode.DataNode (DataNode.java:run(1459)) - DatanodeRegistration(127.0.0.1:49946, storageID=DS-951812744-127.0.1.1-49946-1295008966339, infoPort=45554, ipcPort=59154):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-14 12:42:57,362 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 59154
    [junit] 2011-01-14 12:42:57,362 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-14 12:42:57,363 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-01-14 12:42:57,363 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-14 12:42:57,363 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-14 12:42:57,467 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2828)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-14 12:42:57,468 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-14 12:42:57,468 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 3 
    [junit] 2011-01-14 12:42:57,469 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 60808
    [junit] 2011-01-14 12:42:57,470 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 60808: exiting
    [junit] 2011-01-14 12:42:57,470 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 60808: exiting
    [junit] 2011-01-14 12:42:57,470 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 60808: exiting
    [junit] 2011-01-14 12:42:57,470 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-14 12:42:57,470 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 60808
    [junit] 2011-01-14 12:42:57,470 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 60808: exiting
    [junit] 2011-01-14 12:42:57,470 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 60808: exiting
    [junit] 2011-01-14 12:42:57,470 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 60808: exiting
    [junit] 2011-01-14 12:42:57,470 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 60808: exiting
    [junit] 2011-01-14 12:42:57,470 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 60808: exiting
    [junit] 2011-01-14 12:42:57,470 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 60808: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.693 sec
    [junit] 2011-01-14 12:42:57,470 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 60808: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:735: Tests failed!

Total time: 69 minutes 32 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.namenode.TestListCorruptFileBlocks.testListCorruptFileBlocksInSafeMode

Error Message:
Namenode is not in safe mode

Stack Trace:
junit.framework.AssertionFailedError: Namenode is not in safe mode
	at org.apache.hadoop.hdfs.server.namenode.TestListCorruptFileBlocks.__CLR3_0_2mvj3yzov5(TestListCorruptFileBlocks.java:241)
	at org.apache.hadoop.hdfs.server.namenode.TestListCorruptFileBlocks.testListCorruptFileBlocksInSafeMode(TestListCorruptFileBlocks.java:132)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.EPollArrayWrapper.epollCreate(Native Method)
	at sun.nio.ch.EPollArrayWrapper.<init>(EPollArrayWrapper.java:68)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:52)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:421)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:512)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:282)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:264)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1575)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 61d49c6529efde504086234d635bb763 but expecting 012735fb8eaf59302e97c2cd1f88a8d2

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 61d49c6529efde504086234d635bb763 but expecting 012735fb8eaf59302e97c2cd1f88a8d2
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tkw(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 551 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/551/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 646141 lines...]
    [junit] 2011-01-13 13:38:19,127 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-13 13:38:19,128 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-13 13:38:19,128 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(786)) - Shutting down DataNode 0
    [junit] 2011-01-13 13:38:19,230 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 39737
    [junit] 2011-01-13 13:38:19,230 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 39737: exiting
    [junit] 2011-01-13 13:38:19,230 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-13 13:38:19,230 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:45424, storageID=DS-1888505084-127.0.1.1-45424-1294925888090, infoPort=47456, ipcPort=39737):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-13 13:38:19,230 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 39737
    [junit] 2011-01-13 13:38:19,230 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-01-13 13:38:19,332 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-13 13:38:19,332 INFO  datanode.DataNode (DataNode.java:run(1459)) - DatanodeRegistration(127.0.0.1:45424, storageID=DS-1888505084-127.0.1.1-45424-1294925888090, infoPort=47456, ipcPort=39737):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-13 13:38:19,332 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 39737
    [junit] 2011-01-13 13:38:19,332 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-13 13:38:19,332 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-01-13 13:38:19,333 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-13 13:38:19,333 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-13 13:38:19,435 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2828)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-13 13:38:19,436 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 2 
    [junit] 2011-01-13 13:38:19,435 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-13 13:38:19,437 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 37234
    [junit] 2011-01-13 13:38:19,437 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 37234: exiting
    [junit] 2011-01-13 13:38:19,437 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 37234: exiting
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 37234: exiting
    [junit] 2011-01-13 13:38:19,437 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 37234: exiting
    [junit] 2011-01-13 13:38:19,437 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 37234: exiting
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 37234: exiting
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 37234: exiting
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 37234: exiting
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 37234: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.941 sec
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 37234: exiting
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 37234
    [junit] 2011-01-13 13:38:19,438 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:735: Tests failed!

Total time: 101 minutes 48 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
6 tests failed.
REGRESSION:  org.apache.hadoop.cli.TestHDFSCLI.testAll

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testErrorReplicas

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files  at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)  at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)  at org.apache.hadoop.util.Shell.run(Shell.java:188)  at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)  at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)  at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)  at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)  at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)  at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)  at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)  at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)  at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)  at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)  at junit.framework.TestCase.runBare(TestCase.java:132)  at junit.framework.TestResult$1.protect(TestResult.java:110)  at junit.framework.TestResult.runProtected(TestResult.java:128)  at junit.framework.TestResult.run(TestResult.java:113)  at junit.framework.TestCase.run(TestCase.java:124)  at junit.framework.TestSuite.runTest(TestSuite.java:232)  at junit.framework.TestSuite.run(TestSuite.java:227)  at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)  at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768) Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files  at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)  at java.lang.ProcessImpl.start(ProcessImpl.java:65)  at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)  ... 34 more 

Stack Trace:
java.lang.RuntimeException: Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)
	at org.apache.hadoop.util.Shell.run(Shell.java:188)
	at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)
	at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)
	at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
	at java.lang.ProcessImpl.start(ProcessImpl.java:65)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)

	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:516)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1592)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1546)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1411)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1357)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:600)
	at org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:804)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqy9(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1460)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 1a1e36ce754de9bd267aa770afefee09 but expecting cb0fe2de12a839f8f0f484037fc59364

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 1a1e36ce754de9bd267aa770afefee09 but expecting cb0fe2de12a839f8f0f484037fc59364
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tkw(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 550 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/550/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 583178 lines...]
    [junit] 2011-01-12 12:15:59,265 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7133826699060571543_1007 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir14/blk_7133826699060571543 for deletion
    [junit] 2011-01-12 12:15:59,266 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7133826699060571543_1007 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir14/blk_7133826699060571543
    [junit] 2011-01-12 12:15:59,265 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_1877779036222520603_1071 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir14/blk_1877779036222520603
    [junit] 2011-01-12 12:15:59,266 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7324148003189005245_1083 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir23/blk_7324148003189005245 for deletion
    [junit] 2011-01-12 12:15:59,266 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7438485804283008114_1077 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir5/blk_7438485804283008114 for deletion
    [junit] 2011-01-12 12:15:59,266 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7438485804283008114_1077 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir5/blk_7438485804283008114
    [junit] 2011-01-12 12:15:59,266 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_1981434557925262520_1035 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir50/blk_1981434557925262520
    [junit] 2011-01-12 12:15:59,266 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7456920679024659133_1011 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir31/blk_7456920679024659133 for deletion
    [junit] 2011-01-12 12:15:59,267 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7462014596100478864_1066 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir60/blk_7462014596100478864 for deletion
    [junit] 2011-01-12 12:15:59,267 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7462014596100478864_1066 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir60/blk_7462014596100478864
    [junit] 2011-01-12 12:15:59,266 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2136646620635202842_1062 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir6/blk_2136646620635202842
    [junit] 2011-01-12 12:15:59,267 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7482973825003126488_1018 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir36/blk_7482973825003126488 for deletion
    [junit] 2011-01-12 12:15:59,267 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7610875128325325348_1092 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir9/subdir5/blk_7610875128325325348 for deletion
    [junit] 2011-01-12 12:15:59,267 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7610875128325325348_1092 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir9/subdir5/blk_7610875128325325348
    [junit] 2011-01-12 12:15:59,267 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2229413492508636391_1078 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir19/blk_2229413492508636391
    [junit] 2011-01-12 12:15:59,267 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7636049470277061836_1050 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir61/blk_7636049470277061836 for deletion
    [junit] 2011-01-12 12:15:59,268 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2822990029257396585_1073 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir15/blk_2822990029257396585
    [junit] 2011-01-12 12:15:59,268 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7951012735956913392_1101 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir9/subdir12/blk_7951012735956913392 for deletion
    [junit] 2011-01-12 12:15:59,268 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2836104691913072772_1064 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir8/blk_2836104691913072772
    [junit] 2011-01-12 12:15:59,268 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7951012735956913392_1101 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir9/subdir12/blk_7951012735956913392
    [junit] 2011-01-12 12:15:59,268 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2943456956940644827_1011 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir31/blk_2943456956940644827
    [junit] 2011-01-12 12:15:59,268 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8166646441945823838_1087 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir9/subdir1/blk_8166646441945823838 for deletion
    [junit] 2011-01-12 12:15:59,268 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3135220031642267099_1062 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir7/blk_3135220031642267099
    [junit] 2011-01-12 12:15:59,269 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8309352820689439398_1027 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir43/blk_8309352820689439398 for deletion
    [junit] 2011-01-12 12:15:59,269 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8166646441945823838_1087 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir9/subdir1/blk_8166646441945823838
    [junit] 2011-01-12 12:15:59,269 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3600058978918089493_1014 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir33/blk_3600058978918089493
    [junit] 2011-01-12 12:15:59,269 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8499615003522558129_1042 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir41/blk_8499615003522558129 for deletion
    [junit] 2011-01-12 12:15:59,269 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4119722920877628210_1039 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir53/blk_4119722920877628210
    [junit] 2011-01-12 12:15:59,269 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8524709770549578471_1070 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir63/blk_8524709770549578471 for deletion
    [junit] 2011-01-12 12:15:59,269 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8499615003522558129_1042 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir41/blk_8499615003522558129
    [junit] 2011-01-12 12:15:59,269 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4726054166867830355_1002 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/blk_4726054166867830355
    [junit] 2011-01-12 12:15:59,270 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8727612827448774879_1023 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir40/blk_8727612827448774879 for deletion
    [junit] 2011-01-12 12:15:59,270 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8524709770549578471_1070 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir63/blk_8524709770549578471
    [junit] 2011-01-12 12:15:59,270 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4871355857213401433_1079 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir19/blk_4871355857213401433
    [junit] 2011-01-12 12:15:59,270 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8855108646013251016_1092 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir9/subdir5/blk_8855108646013251016 for deletion
    [junit] 2011-01-12 12:15:59,270 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5020348437279002324_1093 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir13/subdir63/blk_5020348437279002324
    [junit] 2011-01-12 12:15:59,270 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_9099692897962068063_1002 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/blk_9099692897962068063 for deletion
    [junit] 2011-01-12 12:15:59,270 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8855108646013251016_1092 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir9/subdir5/blk_8855108646013251016
    [junit] 2011-01-12 12:15:59,270 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6026962967505898710_1033 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir48/blk_6026962967505898710
    [junit] 2011-01-12 12:15:59,271 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_9099692897962068063_1002 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/blk_9099692897962068063
    [junit] 2011-01-12 12:15:59,271 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6350117763701757876_1071 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir13/blk_6350117763701757876
    [junit] 2011-01-12 12:15:59,271 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6819955796100625225_1043 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir56/blk_6819955796100625225
    [junit] 2011-01-12 12:15:59,271 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7012728499353034015_1004 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir25/blk_7012728499353034015
    [junit] 2011-01-12 12:15:59,271 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7324148003189005245_1083 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir23/blk_7324148003189005245
    [junit] 2011-01-12 12:15:59,271 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7456920679024659133_1011 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir31/blk_7456920679024659133
    [junit] 2011-01-12 12:15:59,272 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7482973825003126488_1018 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir36/blk_7482973825003126488
    [junit] 2011-01-12 12:15:59,272 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7636049470277061836_1050 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir61/blk_7636049470277061836
    [junit] 2011-01-12 12:15:59,272 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8309352820689439398_1027 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir43/blk_8309352820689439398
    [junit] 2011-01-12 12:15:59,272 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8727612827448774879_1023 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir40/blk_8727612827448774879
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
FAILED:  TEST-org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete.xml.<init>

Error Message:


Stack Trace:
Test report file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/TEST-org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete.xml was length 0

FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:421)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:512)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:282)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:264)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1575)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of b6d9bcf4512813be570ea10707a3167f but expecting d48cdf06c6cd2eb74243dce3bc46ef05

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of b6d9bcf4512813be570ea10707a3167f but expecting d48cdf06c6cd2eb74243dce3bc46ef05
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tkm(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 549 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/549/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 643592 lines...]
    [junit] 2011-01-11 12:46:43,100 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(786)) - Shutting down DataNode 0
    [junit] 2011-01-11 12:46:43,202 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 49339
    [junit] 2011-01-11 12:46:43,202 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 49339: exiting
    [junit] 2011-01-11 12:46:43,202 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-11 12:46:43,203 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:41103, storageID=DS-483673450-127.0.1.1-41103-1294749992200, infoPort=39953, ipcPort=49339):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-11 12:46:43,202 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 49339
    [junit] 2011-01-11 12:46:43,203 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-11 12:46:43,225 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-11 12:46:43,304 INFO  datanode.DataNode (DataNode.java:run(1459)) - DatanodeRegistration(127.0.0.1:41103, storageID=DS-483673450-127.0.1.1-41103-1294749992200, infoPort=39953, ipcPort=49339):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-11 12:46:43,304 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 49339
    [junit] 2011-01-11 12:46:43,304 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-11 12:46:43,305 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-01-11 12:46:43,305 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-11 12:46:43,305 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-11 12:46:43,407 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2828)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-11 12:46:43,407 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-11 12:46:43,407 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 4 
    [junit] 2011-01-11 12:46:43,409 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 56241
    [junit] 2011-01-11 12:46:43,409 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 56241: exiting
    [junit] 2011-01-11 12:46:43,409 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 56241: exiting
    [junit] 2011-01-11 12:46:43,409 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 56241: exiting
    [junit] 2011-01-11 12:46:43,410 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 56241: exiting
    [junit] 2011-01-11 12:46:43,410 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 56241
    [junit] 2011-01-11 12:46:43,410 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 56241: exiting
    [junit] 2011-01-11 12:46:43,410 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 56241: exiting
    [junit] 2011-01-11 12:46:43,411 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 56241: exiting
    [junit] 2011-01-11 12:46:43,411 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 56241: exiting
    [junit] 2011-01-11 12:46:43,411 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 56241: exiting
    [junit] 2011-01-11 12:46:43,411 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 56241: exiting
    [junit] 2011-01-11 12:46:43,411 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.495 sec

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:733: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:493: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:674: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:648: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:716: Tests failed!

Total time: 68 minutes 44 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.datanode.TestFiDataTransferProtocol2.pipeline_Fi_35

Error Message:
Forked Java VM exited abnormally. Please note the time in the report does not reflect the time until the VM exit.

Stack Trace:
junit.framework.AssertionFailedError: Forked Java VM exited abnormally. Please note the time in the report does not reflect the time until the VM exit.


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:421)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:512)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:282)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:264)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1575)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 6851f4b6e59639b59783e65ae30ec564 but expecting 5ff6b5ff346420bcf6baebada241c360

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 6851f4b6e59639b59783e65ae30ec564 but expecting 5ff6b5ff346420bcf6baebada241c360
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tkm(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 548 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/548/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 486448 lines...]
    [junit] 2011-01-10 12:03:47,966 INFO  mortbay.log (?:invoke0(?)) - Started SelectChannelConnector@localhost:50089
    [junit] 2011-01-10 12:03:47,967 INFO  namenode.NameNode (NameNode.java:run(523)) - NameNode Web-server up at: localhost/127.0.0.1:50089
    [junit] 2011-01-10 12:03:47,968 INFO  ipc.Server (Server.java:run(443)) - IPC Server listener on 36943: starting
    [junit] 2011-01-10 12:03:47,968 INFO  ipc.Server (Server.java:run(608)) - IPC Server Responder: starting
    [junit] 2011-01-10 12:03:47,968 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 0 on 36943: starting
    [junit] 2011-01-10 12:03:47,969 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 2 on 36943: starting
    [junit] 2011-01-10 12:03:47,969 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 1 on 36943: starting
    [junit] 2011-01-10 12:03:47,969 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 3 on 36943: starting
    [junit] 2011-01-10 12:03:47,969 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 4 on 36943: starting
    [junit] 2011-01-10 12:03:47,970 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 5 on 36943: starting
    [junit] 2011-01-10 12:03:47,970 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 6 on 36943: starting
    [junit] 2011-01-10 12:03:47,970 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 7 on 36943: starting
    [junit] 2011-01-10 12:03:47,970 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 9 on 36943: starting
    [junit] 2011-01-10 12:03:47,970 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 8 on 36943: starting
    [junit] 2011-01-10 12:03:47,971 INFO  namenode.NameNode (NameNode.java:initialize(390)) - NameNode up at: localhost/127.0.0.1:36943
    [junit] Starting DataNode 0 with dfs.datanode.data.dir: file:/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/,file:/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/
    [junit] 2011-01-10 12:03:48,145 INFO  datanode.DataNode (DataNode.java:initDataXceiver(472)) - Opened info server at 51231
    [junit] 2011-01-10 12:03:48,149 INFO  datanode.DataNode (DataXceiverServer.java:<init>(77)) - Balancing bandwith is 1048576 bytes/s
    [junit] 2011-01-10 12:03:48,155 INFO  common.Storage (DataStorage.java:recoverTransitionRead(127)) - Storage directory /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1 is not formatted.
    [junit] 2011-01-10 12:03:48,156 INFO  common.Storage (DataStorage.java:recoverTransitionRead(128)) - Formatting ...
    [junit] 2011-01-10 12:03:48,159 INFO  common.Storage (DataStorage.java:recoverTransitionRead(127)) - Storage directory /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2 is not formatted.
    [junit] 2011-01-10 12:03:48,159 INFO  common.Storage (DataStorage.java:recoverTransitionRead(128)) - Formatting ...
    [junit] 2011-01-10 12:03:48,213 INFO  datanode.DataNode (FSDataset.java:registerMBean(1772)) - Registered FSDatasetStatusMBean
    [junit] 2011-01-10 12:03:48,221 INFO  datanode.DirectoryScanner (DirectoryScanner.java:<init>(149)) - scan starts at 1294678853221 with interval 21600000
    [junit] 2011-01-10 12:03:48,223 INFO  http.HttpServer (HttpServer.java:addGlobalFilter(409)) - Added global filtersafety (class=org.apache.hadoop.http.HttpServer$QuotingInputFilter)
    [junit] 2011-01-10 12:03:48,226 INFO  http.HttpServer (HttpServer.java:start(579)) - Port returned by webServer.getConnectors()[0].getLocalPort() before open() is -1. Opening the listener on 0
    [junit] 2011-01-10 12:03:48,227 INFO  http.HttpServer (HttpServer.java:start(584)) - listener.getLocalPort() returned 35084 webServer.getConnectors()[0].getLocalPort() returned 35084
    [junit] 2011-01-10 12:03:48,227 INFO  http.HttpServer (HttpServer.java:start(617)) - Jetty bound to port 35084
    [junit] 2011-01-10 12:03:48,228 INFO  mortbay.log (?:invoke0(?)) - jetty-6.1.14
    [junit] 2011-01-10 12:03:48,385 INFO  mortbay.log (?:invoke0(?)) - Started SelectChannelConnector@localhost:35084
    [junit] 2011-01-10 12:03:48,387 INFO  jvm.JvmMetrics (JvmMetrics.java:init(71)) - Cannot initialize JVM Metrics with processName=DataNode, sessionId=null - already initialized
    [junit] 2011-01-10 12:03:48,391 INFO  ipc.Server (Server.java:run(338)) - Starting SocketReader
    [junit] 2011-01-10 12:03:48,392 INFO  metrics.RpcMetrics (RpcMetrics.java:<init>(63)) - Initializing RPC Metrics with hostName=DataNode, port=48972
    [junit] 2011-01-10 12:03:48,392 INFO  metrics.RpcDetailedMetrics (RpcDetailedMetrics.java:<init>(57)) - Initializing RPC Metrics with hostName=DataNode, port=48972
    [junit] 2011-01-10 12:03:48,394 INFO  datanode.DataNode (DataNode.java:initIpcServer(432)) - dnRegistration = DatanodeRegistration(h9.grid.sp2.yahoo.net:51231, storageID=, infoPort=35084, ipcPort=48972)
    [junit] 2011-01-10 12:03:48,399 INFO  hdfs.StateChange (FSNamesystem.java:registerDatanode(2514)) - BLOCK* NameSystem.registerDatanode: node registration from 127.0.0.1:51231 storage DS-261089149-127.0.1.1-51231-1294661028397
    [junit] 2011-01-10 12:03:48,406 INFO  net.NetworkTopology (NetworkTopology.java:add(331)) - Adding a new node: /default-rack/127.0.0.1:51231
    [junit] 2011-01-10 12:03:48,411 INFO  datanode.DataNode (DataNode.java:register(714)) - New storage id DS-261089149-127.0.1.1-51231-1294661028397 is assigned to data-node 127.0.0.1:51231
    [junit] 2011-01-10 12:03:48,412 INFO  datanode.DataNode (DataNode.java:run(1438)) - DatanodeRegistration(127.0.0.1:51231, storageID=DS-261089149-127.0.1.1-51231-1294661028397, infoPort=35084, ipcPort=48972)In DataNode.run, data = FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-10 12:03:48,413 INFO  ipc.Server (Server.java:run(608)) - IPC Server Responder: starting
    [junit] 2011-01-10 12:03:48,413 INFO  ipc.Server (Server.java:run(443)) - IPC Server listener on 48972: starting
    [junit] 2011-01-10 12:03:48,413 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 0 on 48972: starting
    [junit] 2011-01-10 12:03:48,413 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 1 on 48972: starting
    [junit] 2011-01-10 12:03:48,414 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 2 on 48972: starting
    [junit] 2011-01-10 12:03:48,414 INFO  datanode.DataNode (DataNode.java:offerService(904)) - using BLOCKREPORT_INTERVAL of 21600000msec Initial delay: 0msec
    [junit] 2011-01-10 12:03:48,426 INFO  datanode.DataNode (DataNode.java:blockReport(1143)) - BlockReport of 0 blocks got processed in 8 msecs
    [junit] 2011-01-10 12:03:48,426 INFO  datanode.DataNode (DataNode.java:offerService(946)) - Starting Periodic block scanner.
    [junit] 2011-01-10 12:03:48,502 INFO  FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson	ip=/127.0.0.1	cmd=create	src=/testWriteConf.xml	dst=null	perm=hudson:supergroup:rw-r--r--
    [junit] Tests run: 1, Failures: 0, Errors: 1, Time elapsed: 60.073 sec
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1523)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1388)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1334)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:577)
	at org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:781)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqxn(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1437)


FAILED:  org.apache.hadoop.hdfs.TestWriteConfigurationToDFS.testWriteConf

Error Message:
test timed out after 60000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
	at java.lang.Object.wait(Native Method)
	at java.lang.Object.wait(Object.java:485)
	at org.apache.hadoop.hdfs.DFSOutputStream.waitAndQueueCurrentPacket(DFSOutputStream.java:1169)
	at org.apache.hadoop.hdfs.DFSOutputStream.writeChunk(DFSOutputStream.java:1228)
	at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunk(FSOutputSummer.java:161)
	at org.apache.hadoop.fs.FSOutputSummer.write1(FSOutputSummer.java:104)
	at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:90)
	at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:54)
	at java.io.DataOutputStream.write(DataOutputStream.java:90)
	at sun.nio.cs.StreamEncoder.writeBytes(StreamEncoder.java:202)
	at sun.nio.cs.StreamEncoder.implWrite(StreamEncoder.java:263)
	at sun.nio.cs.StreamEncoder.write(StreamEncoder.java:106)
	at java.io.OutputStreamWriter.write(OutputStreamWriter.java:190)
	at com.sun.org.apache.xml.internal.serializer.ToStream.characters(ToStream.java:1499)
	at com.sun.org.apache.xml.internal.serializer.ToUnknownStream.characters(ToUnknownStream.java:789)
	at com.sun.org.apache.xml.internal.serializer.ToUnknownStream.characters(ToUnknownStream.java:323)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:240)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:226)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:226)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:226)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:132)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:94)
	at com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transformIdentity(TransformerImpl.java:662)
	at com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transform(TransformerImpl.java:708)
	at com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transform(TransformerImpl.java:313)
	at org.apache.hadoop.conf.Configuration.writeXml(Configuration.java:1608)
	at org.apache.hadoop.conf.Configuration.writeXml(Configuration.java:1559)
	at org.apache.hadoop.hdfs.TestWriteConfigurationToDFS.__CLR3_0_28n7kbs1103(TestWriteConfigurationToDFS.java:46)
	at org.apache.hadoop.hdfs.TestWriteConfigurationToDFS.testWriteConf(TestWriteConfigurationToDFS.java:33)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 8ae344dba36f40d8329436e071f0f82a but expecting a3f08ee8cfb2a2902201330479fdd343

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 8ae344dba36f40d8329436e071f0f82a but expecting a3f08ee8cfb2a2902201330479fdd343
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tka(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 547 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/547/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 473004 lines...]
    [junit] 2011-01-09 12:04:09,262 INFO  mortbay.log (?:invoke0(?)) - Started SelectChannelConnector@localhost:47240
    [junit] 2011-01-09 12:04:09,262 INFO  namenode.NameNode (NameNode.java:run(523)) - NameNode Web-server up at: localhost/127.0.0.1:47240
    [junit] 2011-01-09 12:04:09,263 INFO  ipc.Server (Server.java:run(608)) - IPC Server Responder: starting
    [junit] 2011-01-09 12:04:09,263 INFO  ipc.Server (Server.java:run(443)) - IPC Server listener on 44994: starting
    [junit] 2011-01-09 12:04:09,264 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 0 on 44994: starting
    [junit] 2011-01-09 12:04:09,264 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 2 on 44994: starting
    [junit] 2011-01-09 12:04:09,264 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 1 on 44994: starting
    [junit] 2011-01-09 12:04:09,264 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 4 on 44994: starting
    [junit] 2011-01-09 12:04:09,264 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 3 on 44994: starting
    [junit] 2011-01-09 12:04:09,264 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 5 on 44994: starting
    [junit] 2011-01-09 12:04:09,265 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 6 on 44994: starting
    [junit] 2011-01-09 12:04:09,265 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 8 on 44994: starting
    [junit] 2011-01-09 12:04:09,265 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 7 on 44994: starting
    [junit] 2011-01-09 12:04:09,265 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 9 on 44994: starting
    [junit] 2011-01-09 12:04:09,266 INFO  namenode.NameNode (NameNode.java:initialize(390)) - NameNode up at: localhost/127.0.0.1:44994
    [junit] Starting DataNode 0 with dfs.datanode.data.dir: file:/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/,file:/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/
    [junit] 2011-01-09 12:04:09,434 INFO  datanode.DataNode (DataNode.java:initDataXceiver(472)) - Opened info server at 54360
    [junit] 2011-01-09 12:04:09,438 INFO  datanode.DataNode (DataXceiverServer.java:<init>(77)) - Balancing bandwith is 1048576 bytes/s
    [junit] 2011-01-09 12:04:09,444 INFO  common.Storage (DataStorage.java:recoverTransitionRead(127)) - Storage directory /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1 is not formatted.
    [junit] 2011-01-09 12:04:09,445 INFO  common.Storage (DataStorage.java:recoverTransitionRead(128)) - Formatting ...
    [junit] 2011-01-09 12:04:09,448 INFO  common.Storage (DataStorage.java:recoverTransitionRead(127)) - Storage directory /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2 is not formatted.
    [junit] 2011-01-09 12:04:09,449 INFO  common.Storage (DataStorage.java:recoverTransitionRead(128)) - Formatting ...
    [junit] 2011-01-09 12:04:09,504 INFO  datanode.DataNode (FSDataset.java:registerMBean(1772)) - Registered FSDatasetStatusMBean
    [junit] 2011-01-09 12:04:09,511 INFO  datanode.DirectoryScanner (DirectoryScanner.java:<init>(149)) - scan starts at 1294585621511 with interval 21600000
    [junit] 2011-01-09 12:04:09,513 INFO  http.HttpServer (HttpServer.java:addGlobalFilter(409)) - Added global filtersafety (class=org.apache.hadoop.http.HttpServer$QuotingInputFilter)
    [junit] 2011-01-09 12:04:09,517 INFO  http.HttpServer (HttpServer.java:start(579)) - Port returned by webServer.getConnectors()[0].getLocalPort() before open() is -1. Opening the listener on 0
    [junit] 2011-01-09 12:04:09,517 INFO  http.HttpServer (HttpServer.java:start(584)) - listener.getLocalPort() returned 44401 webServer.getConnectors()[0].getLocalPort() returned 44401
    [junit] 2011-01-09 12:04:09,518 INFO  http.HttpServer (HttpServer.java:start(617)) - Jetty bound to port 44401
    [junit] 2011-01-09 12:04:09,518 INFO  mortbay.log (?:invoke0(?)) - jetty-6.1.14
    [junit] 2011-01-09 12:04:09,664 INFO  mortbay.log (?:invoke0(?)) - Started SelectChannelConnector@localhost:44401
    [junit] 2011-01-09 12:04:09,666 INFO  jvm.JvmMetrics (JvmMetrics.java:init(71)) - Cannot initialize JVM Metrics with processName=DataNode, sessionId=null - already initialized
    [junit] 2011-01-09 12:04:09,670 INFO  ipc.Server (Server.java:run(338)) - Starting SocketReader
    [junit] 2011-01-09 12:04:09,670 INFO  metrics.RpcMetrics (RpcMetrics.java:<init>(63)) - Initializing RPC Metrics with hostName=DataNode, port=58842
    [junit] 2011-01-09 12:04:09,671 INFO  metrics.RpcDetailedMetrics (RpcDetailedMetrics.java:<init>(57)) - Initializing RPC Metrics with hostName=DataNode, port=58842
    [junit] 2011-01-09 12:04:09,672 INFO  datanode.DataNode (DataNode.java:initIpcServer(432)) - dnRegistration = DatanodeRegistration(h9.grid.sp2.yahoo.net:54360, storageID=, infoPort=44401, ipcPort=58842)
    [junit] 2011-01-09 12:04:09,677 INFO  hdfs.StateChange (FSNamesystem.java:registerDatanode(2514)) - BLOCK* NameSystem.registerDatanode: node registration from 127.0.0.1:54360 storage DS-1090277604-127.0.1.1-54360-1294574649675
    [junit] 2011-01-09 12:04:09,682 INFO  net.NetworkTopology (NetworkTopology.java:add(331)) - Adding a new node: /default-rack/127.0.0.1:54360
    [junit] 2011-01-09 12:04:09,687 INFO  datanode.DataNode (DataNode.java:register(714)) - New storage id DS-1090277604-127.0.1.1-54360-1294574649675 is assigned to data-node 127.0.0.1:54360
    [junit] 2011-01-09 12:04:09,688 INFO  datanode.DataNode (DataNode.java:run(1438)) - DatanodeRegistration(127.0.0.1:54360, storageID=DS-1090277604-127.0.1.1-54360-1294574649675, infoPort=44401, ipcPort=58842)In DataNode.run, data = FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-09 12:04:09,688 INFO  ipc.Server (Server.java:run(608)) - IPC Server Responder: starting
    [junit] 2011-01-09 12:04:09,689 INFO  ipc.Server (Server.java:run(443)) - IPC Server listener on 58842: starting
    [junit] 2011-01-09 12:04:09,689 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 0 on 58842: starting
    [junit] 2011-01-09 12:04:09,689 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 1 on 58842: starting
    [junit] 2011-01-09 12:04:09,690 INFO  datanode.DataNode (DataNode.java:offerService(904)) - using BLOCKREPORT_INTERVAL of 21600000msec Initial delay: 0msec
    [junit] 2011-01-09 12:04:09,690 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 2 on 58842: starting
    [junit] 2011-01-09 12:04:09,702 INFO  datanode.DataNode (DataNode.java:blockReport(1143)) - BlockReport of 0 blocks got processed in 8 msecs
    [junit] 2011-01-09 12:04:09,702 INFO  datanode.DataNode (DataNode.java:offerService(946)) - Starting Periodic block scanner.
    [junit] 2011-01-09 12:04:09,767 INFO  FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson	ip=/127.0.0.1	cmd=create	src=/testWriteConf.xml	dst=null	perm=hudson:supergroup:rw-r--r--
    [junit] Tests run: 1, Failures: 0, Errors: 1, Time elapsed: 60.074 sec
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:421)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:512)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:282)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:264)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1575)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestWriteConfigurationToDFS.testWriteConf

Error Message:
test timed out after 60000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
	at java.lang.Object.wait(Native Method)
	at java.lang.Object.wait(Object.java:485)
	at org.apache.hadoop.hdfs.DFSOutputStream.waitAndQueueCurrentPacket(DFSOutputStream.java:1169)
	at org.apache.hadoop.hdfs.DFSOutputStream.writeChunk(DFSOutputStream.java:1228)
	at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunk(FSOutputSummer.java:161)
	at org.apache.hadoop.fs.FSOutputSummer.write1(FSOutputSummer.java:104)
	at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:90)
	at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:54)
	at java.io.DataOutputStream.write(DataOutputStream.java:90)
	at sun.nio.cs.StreamEncoder.writeBytes(StreamEncoder.java:202)
	at sun.nio.cs.StreamEncoder.implWrite(StreamEncoder.java:263)
	at sun.nio.cs.StreamEncoder.write(StreamEncoder.java:106)
	at java.io.OutputStreamWriter.write(OutputStreamWriter.java:190)
	at com.sun.org.apache.xml.internal.serializer.ToStream.characters(ToStream.java:1499)
	at com.sun.org.apache.xml.internal.serializer.ToUnknownStream.characters(ToUnknownStream.java:789)
	at com.sun.org.apache.xml.internal.serializer.ToUnknownStream.characters(ToUnknownStream.java:323)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:240)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:226)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:226)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:226)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:132)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:94)
	at com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transformIdentity(TransformerImpl.java:662)
	at com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transform(TransformerImpl.java:708)
	at com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transform(TransformerImpl.java:313)
	at org.apache.hadoop.conf.Configuration.writeXml(Configuration.java:1608)
	at org.apache.hadoop.conf.Configuration.writeXml(Configuration.java:1559)
	at org.apache.hadoop.hdfs.TestWriteConfigurationToDFS.__CLR3_0_28n7kbs1103(TestWriteConfigurationToDFS.java:46)
	at org.apache.hadoop.hdfs.TestWriteConfigurationToDFS.testWriteConf(TestWriteConfigurationToDFS.java:33)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 6ae5155343687c34c267ad438a53a1d4 but expecting 560cb7ae846968145f1a6b1ec1f902df

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 6ae5155343687c34c267ad438a53a1d4 but expecting 560cb7ae846968145f1a6b1ec1f902df
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tka(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 546 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/546/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 463989 lines...]
    [junit] 2011-01-08 12:04:54,920 INFO  mortbay.log (?:invoke0(?)) - Started SelectChannelConnector@localhost:51129
    [junit] 2011-01-08 12:04:54,920 INFO  namenode.NameNode (NameNode.java:run(523)) - NameNode Web-server up at: localhost/127.0.0.1:51129
    [junit] 2011-01-08 12:04:54,921 INFO  ipc.Server (Server.java:run(608)) - IPC Server Responder: starting
    [junit] 2011-01-08 12:04:54,921 INFO  ipc.Server (Server.java:run(443)) - IPC Server listener on 40548: starting
    [junit] 2011-01-08 12:04:54,922 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 0 on 40548: starting
    [junit] 2011-01-08 12:04:54,922 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 3 on 40548: starting
    [junit] 2011-01-08 12:04:54,922 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 2 on 40548: starting
    [junit] 2011-01-08 12:04:54,922 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 1 on 40548: starting
    [junit] 2011-01-08 12:04:54,923 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 4 on 40548: starting
    [junit] 2011-01-08 12:04:54,923 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 5 on 40548: starting
    [junit] 2011-01-08 12:04:54,923 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 6 on 40548: starting
    [junit] 2011-01-08 12:04:54,924 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 7 on 40548: starting
    [junit] 2011-01-08 12:04:54,924 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 8 on 40548: starting
    [junit] 2011-01-08 12:04:54,924 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 9 on 40548: starting
    [junit] 2011-01-08 12:04:54,924 INFO  namenode.NameNode (NameNode.java:initialize(390)) - NameNode up at: localhost/127.0.0.1:40548
    [junit] Starting DataNode 0 with dfs.datanode.data.dir: file:/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/,file:/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/
    [junit] 2011-01-08 12:04:55,079 INFO  datanode.DataNode (DataNode.java:initDataXceiver(472)) - Opened info server at 50470
    [junit] 2011-01-08 12:04:55,083 INFO  datanode.DataNode (DataXceiverServer.java:<init>(77)) - Balancing bandwith is 1048576 bytes/s
    [junit] 2011-01-08 12:04:55,090 INFO  common.Storage (DataStorage.java:recoverTransitionRead(127)) - Storage directory /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1 is not formatted.
    [junit] 2011-01-08 12:04:55,090 INFO  common.Storage (DataStorage.java:recoverTransitionRead(128)) - Formatting ...
    [junit] 2011-01-08 12:04:55,094 INFO  common.Storage (DataStorage.java:recoverTransitionRead(127)) - Storage directory /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2 is not formatted.
    [junit] 2011-01-08 12:04:55,094 INFO  common.Storage (DataStorage.java:recoverTransitionRead(128)) - Formatting ...
    [junit] 2011-01-08 12:04:55,147 INFO  datanode.DataNode (FSDataset.java:registerMBean(1772)) - Registered FSDatasetStatusMBean
    [junit] 2011-01-08 12:04:55,155 INFO  datanode.DirectoryScanner (DirectoryScanner.java:<init>(149)) - scan starts at 1294498367155 with interval 21600000
    [junit] 2011-01-08 12:04:55,157 INFO  http.HttpServer (HttpServer.java:addGlobalFilter(409)) - Added global filtersafety (class=org.apache.hadoop.http.HttpServer$QuotingInputFilter)
    [junit] 2011-01-08 12:04:55,160 INFO  http.HttpServer (HttpServer.java:start(579)) - Port returned by webServer.getConnectors()[0].getLocalPort() before open() is -1. Opening the listener on 0
    [junit] 2011-01-08 12:04:55,160 INFO  http.HttpServer (HttpServer.java:start(584)) - listener.getLocalPort() returned 38806 webServer.getConnectors()[0].getLocalPort() returned 38806
    [junit] 2011-01-08 12:04:55,161 INFO  http.HttpServer (HttpServer.java:start(617)) - Jetty bound to port 38806
    [junit] 2011-01-08 12:04:55,161 INFO  mortbay.log (?:invoke0(?)) - jetty-6.1.14
    [junit] 2011-01-08 12:04:55,319 INFO  mortbay.log (?:invoke0(?)) - Started SelectChannelConnector@localhost:38806
    [junit] 2011-01-08 12:04:55,321 INFO  jvm.JvmMetrics (JvmMetrics.java:init(71)) - Cannot initialize JVM Metrics with processName=DataNode, sessionId=null - already initialized
    [junit] 2011-01-08 12:04:55,326 INFO  ipc.Server (Server.java:run(338)) - Starting SocketReader
    [junit] 2011-01-08 12:04:55,326 INFO  metrics.RpcMetrics (RpcMetrics.java:<init>(63)) - Initializing RPC Metrics with hostName=DataNode, port=48609
    [junit] 2011-01-08 12:04:55,327 INFO  metrics.RpcDetailedMetrics (RpcDetailedMetrics.java:<init>(57)) - Initializing RPC Metrics with hostName=DataNode, port=48609
    [junit] 2011-01-08 12:04:55,328 INFO  datanode.DataNode (DataNode.java:initIpcServer(432)) - dnRegistration = DatanodeRegistration(h9.grid.sp2.yahoo.net:50470, storageID=, infoPort=38806, ipcPort=48609)
    [junit] 2011-01-08 12:04:55,333 INFO  hdfs.StateChange (FSNamesystem.java:registerDatanode(2514)) - BLOCK* NameSystem.registerDatanode: node registration from 127.0.0.1:50470 storage DS-1758152017-127.0.1.1-50470-1294488295332
    [junit] 2011-01-08 12:04:55,340 INFO  net.NetworkTopology (NetworkTopology.java:add(331)) - Adding a new node: /default-rack/127.0.0.1:50470
    [junit] 2011-01-08 12:04:55,345 INFO  datanode.DataNode (DataNode.java:register(714)) - New storage id DS-1758152017-127.0.1.1-50470-1294488295332 is assigned to data-node 127.0.0.1:50470
    [junit] 2011-01-08 12:04:55,346 INFO  datanode.DataNode (DataNode.java:run(1438)) - DatanodeRegistration(127.0.0.1:50470, storageID=DS-1758152017-127.0.1.1-50470-1294488295332, infoPort=38806, ipcPort=48609)In DataNode.run, data = FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-08 12:04:55,346 INFO  ipc.Server (Server.java:run(608)) - IPC Server Responder: starting
    [junit] 2011-01-08 12:04:55,347 INFO  ipc.Server (Server.java:run(443)) - IPC Server listener on 48609: starting
    [junit] 2011-01-08 12:04:55,347 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 0 on 48609: starting
    [junit] 2011-01-08 12:04:55,347 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 1 on 48609: starting
    [junit] 2011-01-08 12:04:55,348 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 2 on 48609: starting
    [junit] 2011-01-08 12:04:55,348 INFO  datanode.DataNode (DataNode.java:offerService(904)) - using BLOCKREPORT_INTERVAL of 21600000msec Initial delay: 0msec
    [junit] 2011-01-08 12:04:55,360 INFO  datanode.DataNode (DataNode.java:blockReport(1143)) - BlockReport of 0 blocks got processed in 8 msecs
    [junit] 2011-01-08 12:04:55,360 INFO  datanode.DataNode (DataNode.java:offerService(946)) - Starting Periodic block scanner.
    [junit] 2011-01-08 12:04:55,435 INFO  FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson	ip=/127.0.0.1	cmd=create	src=/testWriteConf.xml	dst=null	perm=hudson:supergroup:rw-r--r--
    [junit] Tests run: 1, Failures: 0, Errors: 1, Time elapsed: 60.074 sec
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1523)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1388)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1334)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:577)
	at org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:781)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqxn(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1437)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files  at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)  at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)  at org.apache.hadoop.util.Shell.run(Shell.java:188)  at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)  at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)  at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)  at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)  at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)  at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1594)  at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)  at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)  at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)  at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)  at junit.framework.TestCase.runBare(TestCase.java:132)  at junit.framework.TestResult$1.protect(TestResult.java:110)  at junit.framework.TestResult.runProtected(TestResult.java:128)  at junit.framework.TestResult.run(TestResult.java:113)  at junit.framework.TestCase.run(TestCase.java:124)  at junit.framework.TestSuite.runTest(TestSuite.java:232)  at junit.framework.TestSuite.run(TestSuite.java:227)  at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)  at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768) Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files  at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)  at java.lang.ProcessImpl.start(ProcessImpl.java:65)  at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)  ... 34 more 

Stack Trace:
java.lang.RuntimeException: Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)
	at org.apache.hadoop.util.Shell.run(Shell.java:188)
	at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)
	at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)
	at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1594)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
	at java.lang.ProcessImpl.start(ProcessImpl.java:65)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)

	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:516)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1594)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1572)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestWriteConfigurationToDFS.testWriteConf

Error Message:
test timed out after 60000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
	at java.lang.Object.wait(Native Method)
	at java.lang.Object.wait(Object.java:485)
	at org.apache.hadoop.hdfs.DFSOutputStream.waitAndQueueCurrentPacket(DFSOutputStream.java:1169)
	at org.apache.hadoop.hdfs.DFSOutputStream.writeChunk(DFSOutputStream.java:1228)
	at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunk(FSOutputSummer.java:161)
	at org.apache.hadoop.fs.FSOutputSummer.write1(FSOutputSummer.java:104)
	at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:90)
	at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:54)
	at java.io.DataOutputStream.write(DataOutputStream.java:90)
	at sun.nio.cs.StreamEncoder.writeBytes(StreamEncoder.java:202)
	at sun.nio.cs.StreamEncoder.implWrite(StreamEncoder.java:263)
	at sun.nio.cs.StreamEncoder.write(StreamEncoder.java:106)
	at java.io.OutputStreamWriter.write(OutputStreamWriter.java:190)
	at com.sun.org.apache.xml.internal.serializer.ToStream.characters(ToStream.java:1499)
	at com.sun.org.apache.xml.internal.serializer.ToUnknownStream.characters(ToUnknownStream.java:789)
	at com.sun.org.apache.xml.internal.serializer.ToUnknownStream.characters(ToUnknownStream.java:323)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:240)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:226)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:226)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:226)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:132)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:94)
	at com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transformIdentity(TransformerImpl.java:662)
	at com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transform(TransformerImpl.java:708)
	at com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transform(TransformerImpl.java:313)
	at org.apache.hadoop.conf.Configuration.writeXml(Configuration.java:1608)
	at org.apache.hadoop.conf.Configuration.writeXml(Configuration.java:1559)
	at org.apache.hadoop.hdfs.TestWriteConfigurationToDFS.__CLR3_0_28n7kbs1103(TestWriteConfigurationToDFS.java:46)
	at org.apache.hadoop.hdfs.TestWriteConfigurationToDFS.testWriteConf(TestWriteConfigurationToDFS.java:33)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of a5ff67bedc4155cdb986ff24b0bc922a but expecting ed2520fe516bc29595e3e9e159e68de8

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of a5ff67bedc4155cdb986ff24b0bc922a but expecting ed2520fe516bc29595e3e9e159e68de8
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tka(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 545 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/545/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 472645 lines...]
    [junit] 2011-01-07 12:05:09,339 INFO  mortbay.log (?:invoke0(?)) - Started SelectChannelConnector@localhost:50216
    [junit] 2011-01-07 12:05:09,339 INFO  namenode.NameNode (NameNode.java:run(523)) - NameNode Web-server up at: localhost/127.0.0.1:50216
    [junit] 2011-01-07 12:05:09,340 INFO  ipc.Server (Server.java:run(608)) - IPC Server Responder: starting
    [junit] 2011-01-07 12:05:09,340 INFO  ipc.Server (Server.java:run(443)) - IPC Server listener on 53551: starting
    [junit] 2011-01-07 12:05:09,341 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 0 on 53551: starting
    [junit] 2011-01-07 12:05:09,341 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 2 on 53551: starting
    [junit] 2011-01-07 12:05:09,341 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 1 on 53551: starting
    [junit] 2011-01-07 12:05:09,342 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 3 on 53551: starting
    [junit] 2011-01-07 12:05:09,342 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 4 on 53551: starting
    [junit] 2011-01-07 12:05:09,342 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 5 on 53551: starting
    [junit] 2011-01-07 12:05:09,343 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 6 on 53551: starting
    [junit] 2011-01-07 12:05:09,343 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 7 on 53551: starting
    [junit] 2011-01-07 12:05:09,343 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 8 on 53551: starting
    [junit] 2011-01-07 12:05:09,343 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 9 on 53551: starting
    [junit] 2011-01-07 12:05:09,344 INFO  namenode.NameNode (NameNode.java:initialize(390)) - NameNode up at: localhost/127.0.0.1:53551
    [junit] Starting DataNode 0 with dfs.datanode.data.dir: file:/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/,file:/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/
    [junit] 2011-01-07 12:05:09,509 INFO  datanode.DataNode (DataNode.java:initDataXceiver(472)) - Opened info server at 33701
    [junit] 2011-01-07 12:05:09,513 INFO  datanode.DataNode (DataXceiverServer.java:<init>(77)) - Balancing bandwith is 1048576 bytes/s
    [junit] 2011-01-07 12:05:09,520 INFO  common.Storage (DataStorage.java:recoverTransitionRead(127)) - Storage directory /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1 is not formatted.
    [junit] 2011-01-07 12:05:09,520 INFO  common.Storage (DataStorage.java:recoverTransitionRead(128)) - Formatting ...
    [junit] 2011-01-07 12:05:09,523 INFO  common.Storage (DataStorage.java:recoverTransitionRead(127)) - Storage directory /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2 is not formatted.
    [junit] 2011-01-07 12:05:09,524 INFO  common.Storage (DataStorage.java:recoverTransitionRead(128)) - Formatting ...
    [junit] 2011-01-07 12:05:09,576 INFO  datanode.DataNode (FSDataset.java:registerMBean(1772)) - Registered FSDatasetStatusMBean
    [junit] 2011-01-07 12:05:09,584 INFO  datanode.DirectoryScanner (DirectoryScanner.java:<init>(149)) - scan starts at 1294422877584 with interval 21600000
    [junit] 2011-01-07 12:05:09,586 INFO  http.HttpServer (HttpServer.java:addGlobalFilter(409)) - Added global filtersafety (class=org.apache.hadoop.http.HttpServer$QuotingInputFilter)
    [junit] 2011-01-07 12:05:09,589 INFO  http.HttpServer (HttpServer.java:start(579)) - Port returned by webServer.getConnectors()[0].getLocalPort() before open() is -1. Opening the listener on 0
    [junit] 2011-01-07 12:05:09,590 INFO  http.HttpServer (HttpServer.java:start(584)) - listener.getLocalPort() returned 44426 webServer.getConnectors()[0].getLocalPort() returned 44426
    [junit] 2011-01-07 12:05:09,590 INFO  http.HttpServer (HttpServer.java:start(617)) - Jetty bound to port 44426
    [junit] 2011-01-07 12:05:09,590 INFO  mortbay.log (?:invoke0(?)) - jetty-6.1.14
    [junit] 2011-01-07 12:05:09,738 INFO  mortbay.log (?:invoke0(?)) - Started SelectChannelConnector@localhost:44426
    [junit] 2011-01-07 12:05:09,740 INFO  jvm.JvmMetrics (JvmMetrics.java:init(71)) - Cannot initialize JVM Metrics with processName=DataNode, sessionId=null - already initialized
    [junit] 2011-01-07 12:05:09,744 INFO  ipc.Server (Server.java:run(338)) - Starting SocketReader
    [junit] 2011-01-07 12:05:09,744 INFO  metrics.RpcMetrics (RpcMetrics.java:<init>(63)) - Initializing RPC Metrics with hostName=DataNode, port=58103
    [junit] 2011-01-07 12:05:09,745 INFO  metrics.RpcDetailedMetrics (RpcDetailedMetrics.java:<init>(57)) - Initializing RPC Metrics with hostName=DataNode, port=58103
    [junit] 2011-01-07 12:05:09,753 INFO  datanode.DataNode (DataNode.java:initIpcServer(432)) - dnRegistration = DatanodeRegistration(h9.grid.sp2.yahoo.net:33701, storageID=, infoPort=44426, ipcPort=58103)
    [junit] 2011-01-07 12:05:09,759 INFO  hdfs.StateChange (FSNamesystem.java:registerDatanode(2514)) - BLOCK* NameSystem.registerDatanode: node registration from 127.0.0.1:33701 storage DS-2082047178-127.0.1.1-33701-1294401909757
    [junit] 2011-01-07 12:05:09,765 INFO  net.NetworkTopology (NetworkTopology.java:add(331)) - Adding a new node: /default-rack/127.0.0.1:33701
    [junit] 2011-01-07 12:05:09,769 INFO  datanode.DataNode (DataNode.java:register(714)) - New storage id DS-2082047178-127.0.1.1-33701-1294401909757 is assigned to data-node 127.0.0.1:33701
    [junit] 2011-01-07 12:05:09,770 INFO  datanode.DataNode (DataNode.java:run(1438)) - DatanodeRegistration(127.0.0.1:33701, storageID=DS-2082047178-127.0.1.1-33701-1294401909757, infoPort=44426, ipcPort=58103)In DataNode.run, data = FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-07 12:05:09,771 INFO  ipc.Server (Server.java:run(608)) - IPC Server Responder: starting
    [junit] 2011-01-07 12:05:09,771 INFO  ipc.Server (Server.java:run(443)) - IPC Server listener on 58103: starting
    [junit] 2011-01-07 12:05:09,771 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 0 on 58103: starting
    [junit] 2011-01-07 12:05:09,772 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 1 on 58103: starting
    [junit] 2011-01-07 12:05:09,772 INFO  ipc.Server (Server.java:run(1369)) - IPC Server handler 2 on 58103: starting
    [junit] 2011-01-07 12:05:09,772 INFO  datanode.DataNode (DataNode.java:offerService(904)) - using BLOCKREPORT_INTERVAL of 21600000msec Initial delay: 0msec
    [junit] 2011-01-07 12:05:09,784 INFO  datanode.DataNode (DataNode.java:blockReport(1143)) - BlockReport of 0 blocks got processed in 8 msecs
    [junit] 2011-01-07 12:05:09,784 INFO  datanode.DataNode (DataNode.java:offerService(946)) - Starting Periodic block scanner.
    [junit] 2011-01-07 12:05:09,857 INFO  FSNamesystem.audit (FSNamesystem.java:logAuditEvent(148)) - ugi=hudson	ip=/127.0.0.1	cmd=create	src=/testWriteConf.xml	dst=null	perm=hudson:supergroup:rw-r--r--
    [junit] Tests run: 1, Failures: 0, Errors: 1, Time elapsed: 60.062 sec
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot run program "du": java.io.IOException: error=24, Too many open files

Stack Trace:
java.io.IOException: Cannot run program "du": java.io.IOException: error=24, Too many open files
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)
	at org.apache.hadoop.util.Shell.run(Shell.java:188)
	at org.apache.hadoop.fs.DU.<init>(DU.java:57)
	at org.apache.hadoop.fs.DU.<init>(DU.java:67)
	at org.apache.hadoop.hdfs.server.datanode.FSDataset$FSVolume.<init>(FSDataset.java:342)
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.<init>(FSDataset.java:873)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initFsDataSet(DataNode.java:400)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:505)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:282)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:264)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1575)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
	at java.lang.ProcessImpl.start(ProcessImpl.java:65)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)


FAILED:  org.apache.hadoop.hdfs.TestWriteConfigurationToDFS.testWriteConf

Error Message:
test timed out after 60000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
	at java.lang.Object.wait(Native Method)
	at java.lang.Object.wait(Object.java:485)
	at org.apache.hadoop.hdfs.DFSOutputStream.waitAndQueueCurrentPacket(DFSOutputStream.java:1169)
	at org.apache.hadoop.hdfs.DFSOutputStream.writeChunk(DFSOutputStream.java:1228)
	at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunk(FSOutputSummer.java:161)
	at org.apache.hadoop.fs.FSOutputSummer.write1(FSOutputSummer.java:104)
	at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:90)
	at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:54)
	at java.io.DataOutputStream.write(DataOutputStream.java:90)
	at sun.nio.cs.StreamEncoder.writeBytes(StreamEncoder.java:202)
	at sun.nio.cs.StreamEncoder.implWrite(StreamEncoder.java:263)
	at sun.nio.cs.StreamEncoder.write(StreamEncoder.java:106)
	at java.io.OutputStreamWriter.write(OutputStreamWriter.java:190)
	at com.sun.org.apache.xml.internal.serializer.ToStream.characters(ToStream.java:1499)
	at com.sun.org.apache.xml.internal.serializer.ToUnknownStream.characters(ToUnknownStream.java:789)
	at com.sun.org.apache.xml.internal.serializer.ToUnknownStream.characters(ToUnknownStream.java:323)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:240)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:226)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:226)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:226)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:132)
	at com.sun.org.apache.xalan.internal.xsltc.trax.DOM2TO.parse(DOM2TO.java:94)
	at com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transformIdentity(TransformerImpl.java:662)
	at com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transform(TransformerImpl.java:708)
	at com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transform(TransformerImpl.java:313)
	at org.apache.hadoop.conf.Configuration.writeXml(Configuration.java:1608)
	at org.apache.hadoop.conf.Configuration.writeXml(Configuration.java:1559)
	at org.apache.hadoop.hdfs.TestWriteConfigurationToDFS.__CLR3_0_28n7kbs1103(TestWriteConfigurationToDFS.java:46)
	at org.apache.hadoop.hdfs.TestWriteConfigurationToDFS.testWriteConf(TestWriteConfigurationToDFS.java:33)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 530fc00dbe01d164bde9cfa80d9be7a8 but expecting 45bf02671e0987a350184f34f4fd9881

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 530fc00dbe01d164bde9cfa80d9be7a8 but expecting 45bf02671e0987a350184f34f4fd9881
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tka(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 544 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/544/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 700694 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:445)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:633)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2011-01-06 13:39:56,170 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-06 13:39:56,201 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-06 13:39:56,271 INFO  datanode.DataNode (DataNode.java:run(1459)) - DatanodeRegistration(127.0.0.1:51576, storageID=DS-1507657681-127.0.1.1-51576-1294321185165, infoPort=49309, ipcPort=35875):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-06 13:39:56,271 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 35875
    [junit] 2011-01-06 13:39:56,271 INFO  datanode.DataNode (DataNode.java:shutdown(785)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-06 13:39:56,271 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-01-06 13:39:56,272 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-06 13:39:56,272 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-06 13:39:56,374 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-06 13:39:56,374 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-06 13:39:56,375 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 3 
    [junit] 2011-01-06 13:39:56,376 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 50277
    [junit] 2011-01-06 13:39:56,376 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 50277: exiting
    [junit] 2011-01-06 13:39:56,377 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 50277
    [junit] 2011-01-06 13:39:56,377 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-06 13:39:56,378 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 50277: exiting
    [junit] 2011-01-06 13:39:56,378 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 50277: exiting
    [junit] 2011-01-06 13:39:56,377 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 50277: exiting
    [junit] 2011-01-06 13:39:56,377 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 50277: exiting
    [junit] 2011-01-06 13:39:56,377 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 50277: exiting
    [junit] 2011-01-06 13:39:56,378 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 50277: exiting
    [junit] 2011-01-06 13:39:56,378 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 50277: exiting
    [junit] 2011-01-06 13:39:56,378 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 50277: exiting
    [junit] 2011-01-06 13:39:56,378 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 50277: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.414 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:735: Tests failed!

Total time: 66 minutes 1 second
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 0304810f4c0afa204ae5e6750b5cae17 but expecting 55219b4da98f831156cc4167aa775884

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 0304810f4c0afa204ae5e6750b5cae17 but expecting 55219b4da98f831156cc4167aa775884
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tk6(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 543 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/543/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 9 lines...]
	at hudson.model.AbstractBuild$AbstractRunner.checkout(AbstractBuild.java:479)
	at hudson.model.AbstractBuild$AbstractRunner.run(AbstractBuild.java:411)
	at hudson.model.Run.run(Run.java:1324)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:46)
	at hudson.model.ResourceController.execute(ResourceController.java:88)
	at hudson.model.Executor.run(Executor.java:139)
Caused by: java.io.IOException: Remote call on hadoop8 failed
	at hudson.remoting.Channel.call(Channel.java:639)
	at hudson.FilePath.act(FilePath.java:742)
	... 10 more
Caused by: java.lang.NoClassDefFoundError: java/net/SocketTimeoutException
	at org.tmatesoft.svn.core.internal.io.dav.http.HTTPConnection.request(HTTPConnection.java:380)
	at org.tmatesoft.svn.core.internal.io.dav.http.HTTPConnection.request(HTTPConnection.java:275)
	at org.tmatesoft.svn.core.internal.io.dav.http.HTTPConnection.request(HTTPConnection.java:263)
	at org.tmatesoft.svn.core.internal.io.dav.DAVConnection.doPropfind(DAVConnection.java:126)
	at org.tmatesoft.svn.core.internal.io.dav.DAVUtil.getProperties(DAVUtil.java:73)
	at org.tmatesoft.svn.core.internal.io.dav.DAVUtil.getResourceProperties(DAVUtil.java:79)
	at org.tmatesoft.svn.core.internal.io.dav.DAVUtil.getPropertyValue(DAVUtil.java:93)
	at org.tmatesoft.svn.core.internal.io.dav.DAVUtil.getBaselineProperties(DAVUtil.java:245)
	at org.tmatesoft.svn.core.internal.io.dav.DAVUtil.getBaselineInfo(DAVUtil.java:184)
	at org.tmatesoft.svn.core.internal.io.dav.DAVRepository.getLatestRevision(DAVRepository.java:182)
	at org.tmatesoft.svn.core.wc.SVNBasicClient.getRevisionNumber(SVNBasicClient.java:482)
	at org.tmatesoft.svn.core.wc.SVNBasicClient.getLocations(SVNBasicClient.java:873)
	at org.tmatesoft.svn.core.wc.SVNBasicClient.createRepository(SVNBasicClient.java:534)
	at org.tmatesoft.svn.core.wc.SVNUpdateClient.doCheckout(SVNUpdateClient.java:901)
	at hudson.scm.SubversionSCM$CheckOutTask.invoke(SubversionSCM.java:678)
	at hudson.scm.SubversionSCM$CheckOutTask.invoke(SubversionSCM.java:596)
	at hudson.FilePath$FileCallableWrapper.call(FilePath.java:1899)
	at hudson.remoting.UserRequest.perform(UserRequest.java:114)
	at hudson.remoting.UserRequest.perform(UserRequest.java:48)
	at hudson.remoting.Request$2.run(Request.java:270)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441)
	at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
	at java.util.concurrent.FutureTask.run(FutureTask.java:138)
	at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
	at java.lang.Thread.run(Thread.java:619)
Caused by: java.lang.ClassNotFoundException: Classloading from system classloader disabled
	at hudson.remoting.RemoteClassLoader$ClassLoaderProxy.fetch2(RemoteClassLoader.java:399)
	at sun.reflect.GeneratedMethodAccessor103.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at hudson.remoting.RemoteInvocationHandler$RPCRequest.perform(RemoteInvocationHandler.java:274)
	at hudson.remoting.Request$2.run(Request.java:270)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441)
	at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
	at java.util.concurrent.FutureTask.run(FutureTask.java:138)
	at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
	at java.lang.Thread.run(Thread.java:662)
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
No tests ran.

Hadoop-Hdfs-trunk - Build # 542 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/542/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 671188 lines...]
    [junit] 2011-01-04 12:41:10,203 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-04 12:41:10,204 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(786)) - Shutting down DataNode 0
    [junit] 2011-01-04 12:41:10,306 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 44700
    [junit] 2011-01-04 12:41:10,306 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 44700: exiting
    [junit] 2011-01-04 12:41:10,306 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 44700
    [junit] 2011-01-04 12:41:10,306 INFO  datanode.DataNode (DataNode.java:shutdown(780)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-01-04 12:41:10,307 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:36094, storageID=DS-942104791-127.0.1.1-36094-1294144859241, infoPort=45027, ipcPort=44700):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-04 12:41:10,306 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-04 12:41:10,309 INFO  datanode.DataNode (DataNode.java:shutdown(780)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-04 12:41:10,410 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-04 12:41:10,410 INFO  datanode.DataNode (DataNode.java:run(1454)) - DatanodeRegistration(127.0.0.1:36094, storageID=DS-942104791-127.0.1.1-36094-1294144859241, infoPort=45027, ipcPort=44700):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-04 12:41:10,410 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 44700
    [junit] 2011-01-04 12:41:10,410 INFO  datanode.DataNode (DataNode.java:shutdown(780)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-04 12:41:10,410 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-01-04 12:41:10,411 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-04 12:41:10,411 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-04 12:41:10,513 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-04 12:41:10,513 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-04 12:41:10,514 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 2 
    [junit] 2011-01-04 12:41:10,515 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 37612
    [junit] 2011-01-04 12:41:10,515 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 37612: exiting
    [junit] 2011-01-04 12:41:10,516 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 37612: exiting
    [junit] 2011-01-04 12:41:10,516 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 37612: exiting
    [junit] 2011-01-04 12:41:10,516 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 37612: exiting
    [junit] 2011-01-04 12:41:10,516 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-04 12:41:10,516 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 37612
    [junit] 2011-01-04 12:41:10,517 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 37612: exiting
    [junit] 2011-01-04 12:41:10,517 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 37612: exiting
    [junit] 2011-01-04 12:41:10,517 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 37612: exiting
    [junit] 2011-01-04 12:41:10,516 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 37612: exiting
    [junit] 2011-01-04 12:41:10,517 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 37612: exiting
    [junit] 2011-01-04 12:41:10,517 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 37612: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.698 sec

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:735: Tests failed!

Total time: 68 minutes 38 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08

Error Message:
Was waiting too long for a replica to become TEMPORARY

Stack Trace:
junit.framework.AssertionFailedError: Was waiting too long for a replica to become TEMPORARY
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.waitForTempReplica(TestBlockReport.java:514)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2j2e00jqib(TestBlockReport.java:408)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08(TestBlockReport.java:390)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Listener.<init>(Server.java:318)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1502)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1570)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1513)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1480)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 512716301cb546f6df34f06631de3ac8 but expecting c5e897690bd058884bdc4bb628a5b1af

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 512716301cb546f6df34f06631de3ac8 but expecting c5e897690bd058884bdc4bb628a5b1af
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjw(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 541 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/541/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 680690 lines...]
    [junit] 2011-01-03 12:39:52,300 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-03 12:39:52,300 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(786)) - Shutting down DataNode 0
    [junit] 2011-01-03 12:39:52,402 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 54394
    [junit] 2011-01-03 12:39:52,403 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 54394: exiting
    [junit] 2011-01-03 12:39:52,404 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2011-01-03 12:39:52,404 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 54394
    [junit] 2011-01-03 12:39:52,404 INFO  datanode.DataNode (DataNode.java:shutdown(780)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2011-01-03 12:39:52,404 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:51305, storageID=DS-483458649-127.0.1.1-51305-1294058381583, infoPort=35823, ipcPort=54394):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-03 12:39:52,407 INFO  datanode.DataNode (DataNode.java:shutdown(780)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-03 12:39:52,507 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-01-03 12:39:52,508 INFO  datanode.DataNode (DataNode.java:run(1454)) - DatanodeRegistration(127.0.0.1:51305, storageID=DS-483458649-127.0.1.1-51305-1294058381583, infoPort=35823, ipcPort=54394):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-01-03 12:39:52,508 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 54394
    [junit] 2011-01-03 12:39:52,508 INFO  datanode.DataNode (DataNode.java:shutdown(780)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2011-01-03 12:39:52,508 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2011-01-03 12:39:52,508 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2011-01-03 12:39:52,509 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2011-01-03 12:39:52,511 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-03 12:39:52,511 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 3 
    [junit] 2011-01-03 12:39:52,511 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2011-01-03 12:39:52,513 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 53690
    [junit] 2011-01-03 12:39:52,513 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 53690: exiting
    [junit] 2011-01-03 12:39:52,513 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 53690
    [junit] 2011-01-03 12:39:52,514 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 53690: exiting
    [junit] 2011-01-03 12:39:52,513 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 53690: exiting
    [junit] 2011-01-03 12:39:52,514 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 53690: exiting
    [junit] 2011-01-03 12:39:52,514 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 53690: exiting
    [junit] 2011-01-03 12:39:52,514 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 53690: exiting
    [junit] 2011-01-03 12:39:52,514 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 53690: exiting
    [junit] 2011-01-03 12:39:52,514 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 53690: exiting
    [junit] 2011-01-03 12:39:52,514 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 53690: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.218 sec
    [junit] 2011-01-03 12:39:52,514 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 53690: exiting
    [junit] 2011-01-03 12:39:52,513 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:735: Tests failed!

Total time: 67 minutes 29 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks expected:<2> but was:<1>
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2fte182qis(TestBlockReport.java:457)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09(TestBlockReport.java:429)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/VERSION (Too many open files)

Stack Trace:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/VERSION (Too many open files)
	at java.io.RandomAccessFile.open(Native Method)
	at java.io.RandomAccessFile.<init>(RandomAccessFile.java:212)
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.write(Storage.java:265)
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.write(Storage.java:259)
	at org.apache.hadoop.hdfs.server.common.Storage.writeAll(Storage.java:800)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.register(DataNode.java:708)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.runDatanodeDaemon(DataNode.java:1464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:644)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1523)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1388)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1334)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:577)
	at org.apache.hadoop.fs.FileSystem.setDefaultUri(FileSystem.java:162)
	at org.apache.hadoop.fs.FileSystem.setDefaultUri(FileSystem.java:170)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:430)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1437)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1523)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1388)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1334)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:577)
	at org.apache.hadoop.fs.FileSystem.setDefaultUri(FileSystem.java:162)
	at org.apache.hadoop.fs.FileSystem.setDefaultUri(FileSystem.java:170)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:430)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1437)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 462b27937407135e43f15ebf6ead87b9 but expecting b659cdd70d61db55bd72d7758e870761

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 462b27937407135e43f15ebf6ead87b9 but expecting b659cdd70d61db55bd72d7758e870761
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjs(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 540 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/540/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 576817 lines...]
    [junit] 2011-01-02 12:19:20,006 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3094852499616718116_1016 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir55/blk_3094852499616718116
    [junit] 2011-01-02 12:19:20,007 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_3336629607501324374_1025 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir62/blk_3336629607501324374 for deletion
    [junit] 2011-01-02 12:19:20,007 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_3503705794993884282_1054 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir20/blk_3503705794993884282 for deletion
    [junit] 2011-01-02 12:19:20,007 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3336629607501324374_1025 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir62/blk_3336629607501324374
    [junit] 2011-01-02 12:19:20,007 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_3548177645357240189_1076 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir63/blk_3548177645357240189 for deletion
    [junit] 2011-01-02 12:19:20,007 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3503705794993884282_1054 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir20/blk_3503705794993884282
    [junit] 2011-01-02 12:19:20,007 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_3966046540798531510_1020 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir58/blk_3966046540798531510 for deletion
    [junit] 2011-01-02 12:19:20,008 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_4197480359425474997_1062 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir27/blk_4197480359425474997 for deletion
    [junit] 2011-01-02 12:19:20,008 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3966046540798531510_1020 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir58/blk_3966046540798531510
    [junit] 2011-01-02 12:19:20,008 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_4696538338555801224_1075 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir37/blk_4696538338555801224 for deletion
    [junit] 2011-01-02 12:19:20,008 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4197480359425474997_1062 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir27/blk_4197480359425474997
    [junit] 2011-01-02 12:19:20,008 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_4718036001403563385_1024 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir22/blk_4718036001403563385 for deletion
    [junit] 2011-01-02 12:19:20,008 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4696538338555801224_1075 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir37/blk_4696538338555801224
    [junit] 2011-01-02 12:19:20,008 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_5081289600839753379_1012 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir12/blk_5081289600839753379 for deletion
    [junit] 2011-01-02 12:19:20,008 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_5367648145032749599_1067 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir30/blk_5367648145032749599 for deletion
    [junit] 2011-01-02 12:19:20,009 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_5738451030434618002_1072 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir34/blk_5738451030434618002 for deletion
    [junit] 2011-01-02 12:19:20,009 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5367648145032749599_1067 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir30/blk_5367648145032749599
    [junit] 2011-01-02 12:19:20,009 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6440001306474029667_1083 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir43/blk_6440001306474029667 for deletion
    [junit] 2011-01-02 12:19:20,009 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5738451030434618002_1072 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir34/blk_5738451030434618002
    [junit] 2011-01-02 12:19:20,009 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6470256019722691967_1037 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir7/blk_6470256019722691967 for deletion
    [junit] 2011-01-02 12:19:20,009 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6440001306474029667_1083 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir43/blk_6440001306474029667
    [junit] 2011-01-02 12:19:20,009 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6761865580464095817_1087 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir25/subdir30/blk_6761865580464095817 for deletion
    [junit] 2011-01-02 12:19:20,009 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6470256019722691967_1037 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir7/blk_6470256019722691967
    [junit] 2011-01-02 12:19:20,010 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2360802547683085934_1092 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir15/subdir51/blk_2360802547683085934
    [junit] 2011-01-02 12:19:20,010 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_2815973936211071386_1099 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir15/subdir57/blk_2815973936211071386
    [junit] 2011-01-02 12:19:20,010 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3038731458209648549_1039 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir34/blk_3038731458209648549
    [junit] 2011-01-02 12:19:20,010 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3171650745887232791_1037 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir32/blk_3171650745887232791
    [junit] 2011-01-02 12:19:20,010 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3548177645357240189_1076 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir63/blk_3548177645357240189
    [junit] 2011-01-02 12:19:20,010 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4718036001403563385_1024 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir22/blk_4718036001403563385
    [junit] 2011-01-02 12:19:20,011 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5081289600839753379_1012 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir12/blk_5081289600839753379
    [junit] 2011-01-02 12:19:20,010 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7233600710066476825_1093 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir25/subdir35/blk_7233600710066476825 for deletion
    [junit] 2011-01-02 12:19:20,010 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6761865580464095817_1087 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir25/subdir30/blk_6761865580464095817
    [junit] 2011-01-02 12:19:20,011 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7443073406982219354_1027 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir63/blk_7443073406982219354 for deletion
    [junit] 2011-01-02 12:19:20,011 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7453620816421832489_1065 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir29/blk_7453620816421832489 for deletion
    [junit] 2011-01-02 12:19:20,011 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7233600710066476825_1093 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir25/subdir35/blk_7233600710066476825
    [junit] 2011-01-02 12:19:20,011 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7963924760258434478_1079 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir1/blk_7963924760258434478 for deletion
    [junit] 2011-01-02 12:19:20,011 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8024732697827363067_1035 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir5/blk_8024732697827363067 for deletion
    [junit] 2011-01-02 12:19:20,011 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7443073406982219354_1027 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir63/blk_7443073406982219354
    [junit] 2011-01-02 12:19:20,011 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7963924760258434478_1079 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir1/blk_7963924760258434478
    [junit] 2011-01-02 12:19:20,011 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8079335518524662835_1036 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir7/blk_8079335518524662835 for deletion
    [junit] 2011-01-02 12:19:20,011 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7453620816421832489_1065 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir29/blk_7453620816421832489
    [junit] 2011-01-02 12:19:20,012 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8894149069458605294_1020 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir19/blk_8894149069458605294 for deletion
    [junit] 2011-01-02 12:19:20,012 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8896519329804598263_1012 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir12/blk_8896519329804598263 for deletion
    [junit] 2011-01-02 12:19:20,012 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8024732697827363067_1035 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir5/blk_8024732697827363067
    [junit] 2011-01-02 12:19:20,012 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8894149069458605294_1020 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir19/blk_8894149069458605294
    [junit] 2011-01-02 12:19:20,012 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8896519329804598263_1012 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir12/blk_8896519329804598263
    [junit] 2011-01-02 12:19:20,012 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8994263464979675409_1023 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir60/blk_8994263464979675409 for deletion
    [junit] 2011-01-02 12:19:20,012 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8079335518524662835_1036 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir7/blk_8079335518524662835
    [junit] 2011-01-02 12:19:20,012 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8994263464979675409_1023 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir60/blk_8994263464979675409
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  TEST-org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete.xml.<init>

Error Message:


Stack Trace:
Test report file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/TEST-org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete.xml was length 0

FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 89f85db8b7e3b318497446c2d02bb0e9 but expecting 56fc07ed5239e3a80dd07be0723ed94c

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 89f85db8b7e3b318497446c2d02bb0e9 but expecting 56fc07ed5239e3a80dd07be0723ed94c
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjm(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 539 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/539/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 375110 lines...]
    [junit] 2011-01-01 13:31:37,273 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:40,273 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:40,274 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:40,274 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:40,274 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:40,274 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:40,274 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:40,274 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:40,275 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:40,275 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:40,275 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:40,275 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:40,275 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:43,276 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:43,276 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:43,276 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:43,276 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:43,277 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:43,277 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:43,277 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:43,277 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:43,277 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:43,278 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:43,278 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:43,278 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:46,279 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:46,279 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:46,279 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:46,279 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:46,279 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:46,279 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:46,280 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:46,280 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:46,280 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:46,280 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:46,280 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:46,281 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:49,281 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:49,281 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:49,282 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:49,282 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:49,282 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:49,282 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:49,282 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:49,282 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:49,283 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:49,283 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
    [junit] 2011-01-01 13:31:49,283 DEBUG namenode.FSNamesystem (BlockPlacementPolicyDefault.java:isGoodTarget(413)) - Node /default-rack/127.0.0.1:47342 is not chosen because the node is too busy
    [junit] 2011-01-01 13:31:49,283 WARN  namenode.FSNamesystem (BlockPlacementPolicyDefault.java:chooseTarget(190)) - Not able to place enough replicas, still in need of 1
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileAppend3.testAppendToPartialChunk

Error Message:
Lease mismatch on /partialChunk/foo owned by HDFS_NameNode but is accessed by DFSClient_NONMAPREDUCE_1664622238_1  at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:1673)  at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:1648)  at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFileInternal(FSNamesystem.java:1703)  at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:1686)  at org.apache.hadoop.hdfs.server.namenode.NameNode.complete(NameNode.java:853)  at sun.reflect.GeneratedMethodAccessor8.invoke(Unknown Source)  at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)  at java.lang.reflect.Method.invoke(Method.java:597)  at org.apache.hadoop.ipc.WritableRpcEngine$Server.call(WritableRpcEngine.java:351)  at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1399)  at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1395)  at java.security.AccessController.doPrivileged(Native Method)  at javax.security.auth.Subject.doAs(Subject.java:396)  at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1115)  at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1393) 

Stack Trace:
org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException: Lease mismatch on /partialChunk/foo owned by HDFS_NameNode but is accessed by DFSClient_NONMAPREDUCE_1664622238_1
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:1673)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:1648)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFileInternal(FSNamesystem.java:1703)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:1686)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.complete(NameNode.java:853)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.call(WritableRpcEngine.java:351)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1399)
	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1395)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:396)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1115)
	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1393)

	at org.apache.hadoop.ipc.Client.call(Client.java:1028)
	at org.apache.hadoop.ipc.WritableRpcEngine$Invoker.invoke(WritableRpcEngine.java:198)
	at $Proxy7.complete(Unknown Source)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:84)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59)
	at $Proxy7.complete(Unknown Source)
	at org.apache.hadoop.hdfs.DFSOutputStream.completeFile(DFSOutputStream.java:1502)
	at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:1489)
	at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:66)
	at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:91)
	at org.apache.hadoop.hdfs.TestFileAppend3.__CLR3_0_2i84krqr3r(TestFileAppend3.java:353)
	at org.apache.hadoop.hdfs.TestFileAppend3.testAppendToPartialChunk(TestFileAppend3.java:311)
	at junit.extensions.TestDecorator.basicRun(TestDecorator.java:24)
	at junit.extensions.TestSetup$1.protect(TestSetup.java:23)
	at junit.extensions.TestSetup.run(TestSetup.java:27)


FAILED:  TEST-org.apache.hadoop.hdfs.TestFileCreationClient.xml.<init>

Error Message:


Stack Trace:
Test report file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/TEST-org.apache.hadoop.hdfs.TestFileCreationClient.xml was length 0

FAILED:  org.apache.hadoop.hdfs.TestDFSStorageStateRecovery.testDNStorageStates

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 69d1774e121411506c39a356a8edb105 but expecting 2cae312c103ccdb1032bc8bc19376950

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 69d1774e121411506c39a356a8edb105 but expecting 2cae312c103ccdb1032bc8bc19376950
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjm(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 538 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/538/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 635629 lines...]
    [junit] 2010-12-31 15:09:15,148 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-31 15:09:15,149 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(786)) - Shutting down DataNode 0
    [junit] 2010-12-31 15:09:15,255 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 45374
    [junit] 2010-12-31 15:09:15,256 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 45374: exiting
    [junit] 2010-12-31 15:09:15,256 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-12-31 15:09:15,256 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-31 15:09:15,257 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 45374
    [junit] 2010-12-31 15:09:15,257 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:51515, storageID=DS-1757069465-127.0.1.1-51515-1293808136567, infoPort=48408, ipcPort=45374):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-31 15:09:15,258 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-31 15:09:15,325 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-31 15:09:15,359 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:51515, storageID=DS-1757069465-127.0.1.1-51515-1293808136567, infoPort=48408, ipcPort=45374):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-31 15:09:15,359 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 45374
    [junit] 2010-12-31 15:09:15,359 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-31 15:09:15,360 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-31 15:09:15,360 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-31 15:09:15,360 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-31 15:09:15,463 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-31 15:09:15,463 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 56Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 5 3 
    [junit] 2010-12-31 15:09:15,463 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-31 15:09:15,464 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 59114
    [junit] 2010-12-31 15:09:15,465 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 59114: exiting
    [junit] 2010-12-31 15:09:15,465 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-31 15:09:15,465 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 59114
    [junit] 2010-12-31 15:09:15,466 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 59114: exiting
    [junit] 2010-12-31 15:09:15,466 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 59114: exiting
    [junit] 2010-12-31 15:09:15,466 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 59114: exiting
    [junit] 2010-12-31 15:09:15,467 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 59114: exiting
    [junit] 2010-12-31 15:09:15,467 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 59114: exiting
    [junit] 2010-12-31 15:09:15,467 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 59114: exiting
    [junit] 2010-12-31 15:09:15,466 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 59114: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 72.309 sec
    [junit] 2010-12-31 15:09:15,466 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 59114: exiting
    [junit] 2010-12-31 15:09:15,466 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 59114: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:735: Tests failed!

Total time: 211 minutes 50 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
6 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestDFSStorageStateRecovery.testDNStorageStates

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


FAILED:  org.apache.hadoop.hdfs.server.common.TestDistributedUpgrade.testDistributedUpgrade

Error Message:
    Data-node missed a distributed upgrade and will shutdown.    Upgrade object for DATA_NODE layout version -24. Name-node version = -27.

Stack Trace:
java.io.IOException: 
   Data-node missed a distributed upgrade and will shutdown.
   Upgrade object for DATA_NODE layout version -24. Name-node version = -27.
	at org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode.preUpgradeAction(UpgradeObjectDatanode.java:96)
	at org.apache.hadoop.hdfs.server.datanode.UpgradeManagerDatanode.initializeUpgrade(UpgradeManagerDatanode.java:60)
	at org.apache.hadoop.hdfs.server.datanode.DataStorage.verifyDistributedUpgradeProgress(DataStorage.java:472)
	at org.apache.hadoop.hdfs.server.datanode.DataStorage.doTransition(DataStorage.java:246)
	at org.apache.hadoop.hdfs.server.datanode.DataStorage.recoverTransitionRead(DataStorage.java:152)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initFsDataSet(DataNode.java:391)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:500)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.server.common.TestDistributedUpgrade.__CLR3_0_2lxfdoypw9(TestDistributedUpgrade.java:131)
	at org.apache.hadoop.hdfs.server.common.TestDistributedUpgrade.testDistributedUpgrade(TestDistributedUpgrade.java:91)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks expected:<2> but was:<1>
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2j2e00jqi1(TestBlockReport.java:414)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08(TestBlockReport.java:390)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete.largeDelete

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 68779d756a63b7ccd7f785f1f110e5f9 but expecting 559afa204aaeff0aabfc1fffba8ea99a

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 68779d756a63b7ccd7f785f1f110e5f9 but expecting 559afa204aaeff0aabfc1fffba8ea99a
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjm(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 537 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/537/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 356894 lines...]
    [junit] 2010-12-30 12:18:36,367 INFO  hdfs.StateChange (BlockManager.java:computeReplicationWorkForBlock(940)) - BLOCK* ask 127.0.0.1:52713 to replicate blk_-4129008170576099953_1012 to datanode(s) 127.0.0.1:58956
    [junit] 2010-12-30 12:18:36,367 INFO  hdfs.StateChange (BlockManager.java:computeReplicationWorkForBlock(940)) - BLOCK* ask 127.0.0.1:40396 to replicate blk_-965578181222415619_1011 to datanode(s) 127.0.0.1:58956
    [junit] 2010-12-30 12:18:36,367 INFO  hdfs.StateChange (BlockManager.java:computeReplicationWorkForBlock(940)) - BLOCK* ask 127.0.0.1:40396 to replicate blk_1739696111893890944_1015 to datanode(s) 127.0.0.1:58956
    [junit] 2010-12-30 12:18:38,302 INFO  datanode.DataNode (DataNode.java:transferBlock(1199)) - DatanodeRegistration(127.0.0.1:40396, storageID=DS-425380035-127.0.1.1-40396-1293710912062, infoPort=47813, ipcPort=46191) Starting thread to transfer block blk_-965578181222415619_1011 to 127.0.0.1:58956 
    [junit] 2010-12-30 12:18:38,375 INFO  datanode.DataNode (DataNode.java:transferBlock(1199)) - DatanodeRegistration(127.0.0.1:40396, storageID=DS-425380035-127.0.1.1-40396-1293710912062, infoPort=47813, ipcPort=46191) Starting thread to transfer block blk_1739696111893890944_1015 to 127.0.0.1:58956 
    [junit] 2010-12-30 12:18:38,376 WARN  datanode.DataNode (DataNode.java:run(1388)) - DatanodeRegistration(127.0.0.1:40396, storageID=DS-425380035-127.0.1.1-40396-1293710912062, infoPort=47813, ipcPort=46191):Failed to transfer blk_-965578181222415619_1011 to 127.0.0.1:58956 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1356)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-30 12:18:38,420 WARN  datanode.DataNode (DataNode.java:run(1388)) - DatanodeRegistration(127.0.0.1:40396, storageID=DS-425380035-127.0.1.1-40396-1293710912062, infoPort=47813, ipcPort=46191):Failed to transfer blk_1739696111893890944_1015 to 127.0.0.1:58956 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1356)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-30 12:18:38,862 INFO  datanode.DataNode (DataNode.java:transferBlock(1199)) - DatanodeRegistration(127.0.0.1:52713, storageID=DS-1948680484-127.0.1.1-52713-1293710912502, infoPort=33618, ipcPort=46347) Starting thread to transfer block blk_-5233455000682746642_1013 to 127.0.0.1:58956 
    [junit] 2010-12-30 12:18:38,896 WARN  datanode.DataNode (DataNode.java:run(1388)) - DatanodeRegistration(127.0.0.1:52713, storageID=DS-1948680484-127.0.1.1-52713-1293710912502, infoPort=33618, ipcPort=46347):Failed to transfer blk_-5233455000682746642_1013 to 127.0.0.1:58956 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1356)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-30 12:18:38,922 INFO  datanode.DataNode (DataNode.java:transferBlock(1199)) - DatanodeRegistration(127.0.0.1:52713, storageID=DS-1948680484-127.0.1.1-52713-1293710912502, infoPort=33618, ipcPort=46347) Starting thread to transfer block blk_-4129008170576099953_1012 to 127.0.0.1:58956 
    [junit] 2010-12-30 12:18:39,035 WARN  datanode.DataNode (DataNode.java:run(1388)) - DatanodeRegistration(127.0.0.1:52713, storageID=DS-1948680484-127.0.1.1-52713-1293710912502, infoPort=33618, ipcPort=46347):Failed to transfer blk_-4129008170576099953_1012 to 127.0.0.1:58956 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1356)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-30 12:18:39,368 INFO  hdfs.StateChange (BlockManager.java:computeReplicationWorkForBlock(940)) - BLOCK* ask 127.0.0.1:52713 to replicate blk_4550489181632327041_1016 to datanode(s) 127.0.0.1:58956
    [junit] 2010-12-30 12:18:41,863 INFO  datanode.DataNode (DataNode.java:transferBlock(1199)) - DatanodeRegistration(127.0.0.1:52713, storageID=DS-1948680484-127.0.1.1-52713-1293710912502, infoPort=33618, ipcPort=46347) Starting thread to transfer block blk_4550489181632327041_1016 to 127.0.0.1:58956 
    [junit] 2010-12-30 12:18:41,940 WARN  datanode.DataNode (DataNode.java:run(1388)) - DatanodeRegistration(127.0.0.1:52713, storageID=DS-1948680484-127.0.1.1-52713-1293710912502, infoPort=33618, ipcPort=46347):Failed to transfer blk_4550489181632327041_1016 to 127.0.0.1:58956 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1356)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  TEST-org.apache.hadoop.hdfs.TestFileCreationClient.xml.<init>

Error Message:


Stack Trace:
Test report file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/TEST-org.apache.hadoop.hdfs.TestFileCreationClient.xml was length 0

FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of e3f4e1ce5337476620e3a2b82c0a4427 but expecting ffd635e70ab2831ebf61350145598ede

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of e3f4e1ce5337476620e3a2b82c0a4427 but expecting ffd635e70ab2831ebf61350145598ede
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjm(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 536 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/536/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 372860 lines...]
    [junit] 
    [junit] 2010-12-29 12:12:02,319 WARN  datanode.DataNode (DataNode.java:run(1388)) - DatanodeRegistration(127.0.0.1:57988, storageID=DS-717331953-127.0.1.1-57988-1293624116104, infoPort=49764, ipcPort=52381):Failed to transfer blk_7925586834405086383_1018 to 127.0.0.1:47190 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1356)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-29 12:12:03,654 INFO  hdfs.StateChange (BlockManager.java:computeReplicationWorkForBlock(940)) - BLOCK* ask 127.0.0.1:57988 to replicate blk_8760831669921389990_1015 to datanode(s) 127.0.0.1:47190
    [junit] 2010-12-29 12:12:05,318 INFO  datanode.DataNode (DataNode.java:transferBlock(1199)) - DatanodeRegistration(127.0.0.1:57988, storageID=DS-717331953-127.0.1.1-57988-1293624116104, infoPort=49764, ipcPort=52381) Starting thread to transfer block blk_8760831669921389990_1015 to 127.0.0.1:47190 
    [junit] 2010-12-29 12:12:05,319 WARN  datanode.DataNode (DataNode.java:run(1388)) - DatanodeRegistration(127.0.0.1:57988, storageID=DS-717331953-127.0.1.1-57988-1293624116104, infoPort=49764, ipcPort=52381):Failed to transfer blk_8760831669921389990_1015 to 127.0.0.1:47190 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1356)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-29 12:13:14,638 DEBUG datanode.DataNode (BlockSender.java:<init>(142)) - block=blk_7559600000448764565_1011, replica=FinalizedReplica, blk_7559600000448764565_1011, FINALIZED
    [junit]   getNumBytes()     = 56
    [junit]   getBytesOnDisk()  = 56
    [junit]   getVisibleLength()= 56
    [junit]   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized
    [junit]   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/blk_7559600000448764565
    [junit]   unlinked=false
    [junit] 2010-12-29 12:13:14,638 DEBUG datanode.DataNode (BlockSender.java:<init>(237)) - replica=FinalizedReplica, blk_7559600000448764565_1011, FINALIZED
    [junit]   getNumBytes()     = 56
    [junit]   getBytesOnDisk()  = 56
    [junit]   getVisibleLength()= 56
    [junit]   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized
    [junit]   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/blk_7559600000448764565
    [junit]   unlinked=false
    [junit] 2010-12-29 12:13:14,639 INFO  datanode.DataBlockScanner (DataBlockScanner.java:verifyBlock(447)) - Verification succeeded for blk_7559600000448764565_1011
    [junit] 2010-12-29 12:15:45,212 DEBUG datanode.DataNode (BlockSender.java:<init>(142)) - block=blk_-8418286807444487879_1013, replica=FinalizedReplica, blk_-8418286807444487879_1013, FINALIZED
    [junit]   getNumBytes()     = 58
    [junit]   getBytesOnDisk()  = 58
    [junit]   getVisibleLength()= 58
    [junit]   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data5/current/finalized
    [junit]   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data5/current/finalized/blk_-8418286807444487879
    [junit]   unlinked=false
    [junit] 2010-12-29 12:15:45,212 DEBUG datanode.DataNode (BlockSender.java:<init>(237)) - replica=FinalizedReplica, blk_-8418286807444487879_1013, FINALIZED
    [junit]   getNumBytes()     = 58
    [junit]   getBytesOnDisk()  = 58
    [junit]   getVisibleLength()= 58
    [junit]   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data5/current/finalized
    [junit]   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data5/current/finalized/blk_-8418286807444487879
    [junit]   unlinked=false
    [junit] 2010-12-29 12:15:45,212 INFO  datanode.DataBlockScanner (DataBlockScanner.java:verifyBlock(447)) - Verification succeeded for blk_-8418286807444487879_1013
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.EPollArrayWrapper.epollCreate(Native Method)
	at sun.nio.ch.EPollArrayWrapper.<init>(EPollArrayWrapper.java:68)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:52)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:315)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2wjxr3fqx5(TestFileConcurrentReader.java:290)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite(TestFileConcurrentReader.java:289)


FAILED:  TEST-org.apache.hadoop.hdfs.TestFileCreationClient.xml.<init>

Error Message:


Stack Trace:
Test report file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/TEST-org.apache.hadoop.hdfs.TestFileCreationClient.xml was length 0

FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of a876945f514913ca1f80d72ae36db306 but expecting 6a7f4d9791b1d62b94f957999fd15f8e

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of a876945f514913ca1f80d72ae36db306 but expecting 6a7f4d9791b1d62b94f957999fd15f8e
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjm(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 535 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/535/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 679319 lines...]
    [junit] 2010-12-28 12:48:05,996 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-28 12:48:05,996 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(786)) - Shutting down DataNode 0
    [junit] 2010-12-28 12:48:06,108 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 40529
    [junit] 2010-12-28 12:48:06,108 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 40529: exiting
    [junit] 2010-12-28 12:48:06,109 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-28 12:48:06,109 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-12-28 12:48:06,109 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 40529
    [junit] 2010-12-28 12:48:06,109 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:50922, storageID=DS-440938063-127.0.1.1-50922-1293540475177, infoPort=52032, ipcPort=40529):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-28 12:48:06,111 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-28 12:48:06,212 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-28 12:48:06,212 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:50922, storageID=DS-440938063-127.0.1.1-50922-1293540475177, infoPort=52032, ipcPort=40529):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-28 12:48:06,212 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 40529
    [junit] 2010-12-28 12:48:06,213 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-28 12:48:06,213 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-28 12:48:06,213 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-28 12:48:06,214 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-28 12:48:06,316 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-28 12:48:06,316 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 3 
    [junit] 2010-12-28 12:48:06,316 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-28 12:48:06,318 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 44595
    [junit] 2010-12-28 12:48:06,318 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 44595: exiting
    [junit] 2010-12-28 12:48:06,319 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-28 12:48:06,319 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 44595: exiting
    [junit] 2010-12-28 12:48:06,318 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 44595
    [junit] 2010-12-28 12:48:06,320 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 44595: exiting
    [junit] 2010-12-28 12:48:06,319 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 44595: exiting
    [junit] 2010-12-28 12:48:06,319 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 44595: exiting
    [junit] 2010-12-28 12:48:06,319 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 44595: exiting
    [junit] 2010-12-28 12:48:06,319 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 44595: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.562 sec
    [junit] 2010-12-28 12:48:06,319 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 44595: exiting
    [junit] 2010-12-28 12:48:06,320 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 44595: exiting
    [junit] 2010-12-28 12:48:06,320 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 44595: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:735: Tests failed!

Total time: 71 minutes 46 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 6216f4f522c871e24d8b4075ff054629 but expecting 001ab656f61a086f93e434d405c31eaa

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 6216f4f522c871e24d8b4075ff054629 but expecting 001ab656f61a086f93e434d405c31eaa
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjm(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 534 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/534/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 686196 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2010-12-27 12:35:38,986 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-12-27 12:35:39,088 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-27 12:35:39,088 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:39405, storageID=DS-1831335352-127.0.1.1-39405-1293453328172, infoPort=34638, ipcPort=40911):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-27 12:35:39,088 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 40911
    [junit] 2010-12-27 12:35:39,088 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-27 12:35:39,089 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-27 12:35:39,089 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-27 12:35:39,089 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-27 12:35:39,191 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-27 12:35:39,192 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 4 4 
    [junit] 2010-12-27 12:35:39,191 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-27 12:35:39,193 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 43074
    [junit] 2010-12-27 12:35:39,194 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 43074: exiting
    [junit] 2010-12-27 12:35:39,194 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 43074: exiting
    [junit] 2010-12-27 12:35:39,194 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 43074
    [junit] 2010-12-27 12:35:39,194 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 43074: exiting
    [junit] 2010-12-27 12:35:39,194 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 43074: exiting
    [junit] 2010-12-27 12:35:39,194 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-27 12:35:39,195 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 43074: exiting
    [junit] 2010-12-27 12:35:39,195 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 43074: exiting
    [junit] 2010-12-27 12:35:39,195 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 43074: exiting
    [junit] 2010-12-27 12:35:39,196 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 43074: exiting
    [junit] 2010-12-27 12:35:39,196 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 43074: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.442 sec
    [junit] 2010-12-27 12:35:39,196 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 43074: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:735: Tests failed!

Total time: 59 minutes 24 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 1c92cdfc786a62164b4a67cc0cd44d7a but expecting 306cb793a3a5ceea89a8cf0789d246b9

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 1c92cdfc786a62164b4a67cc0cd44d7a but expecting 306cb793a3a5ceea89a8cf0789d246b9
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjd(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 533 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/533/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 677674 lines...]
    [junit] 2010-12-26 12:35:19,899 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-26 12:35:19,900 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-26 12:35:19,900 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(786)) - Shutting down DataNode 0
    [junit] 2010-12-26 12:35:20,002 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 33642
    [junit] 2010-12-26 12:35:20,003 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 33642: exiting
    [junit] 2010-12-26 12:35:20,003 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 33642
    [junit] 2010-12-26 12:35:20,003 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:48152, storageID=DS-804703225-127.0.1.1-48152-1293366909143, infoPort=51872, ipcPort=33642):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-26 12:35:20,003 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-12-26 12:35:20,004 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-26 12:35:20,104 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-26 12:35:20,105 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:48152, storageID=DS-804703225-127.0.1.1-48152-1293366909143, infoPort=51872, ipcPort=33642):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-26 12:35:20,105 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 33642
    [junit] 2010-12-26 12:35:20,105 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-26 12:35:20,105 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-26 12:35:20,106 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-26 12:35:20,106 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-26 12:35:20,108 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-26 12:35:20,108 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-26 12:35:20,109 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 2 
    [junit] 2010-12-26 12:35:20,110 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 59031
    [junit] 2010-12-26 12:35:20,110 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 59031: exiting
    [junit] 2010-12-26 12:35:20,110 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 59031: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 35.946 sec
    [junit] 2010-12-26 12:35:20,111 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 59031: exiting
    [junit] 2010-12-26 12:35:20,111 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 59031: exiting
    [junit] 2010-12-26 12:35:20,122 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 59031: exiting
    [junit] 2010-12-26 12:35:20,111 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 59031: exiting
    [junit] 2010-12-26 12:35:20,111 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-26 12:35:20,111 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 59031
    [junit] 2010-12-26 12:35:20,122 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 59031: exiting
    [junit] 2010-12-26 12:35:20,122 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 59031: exiting
    [junit] 2010-12-26 12:35:20,115 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 59031: exiting
    [junit] 2010-12-26 12:35:20,115 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 59031: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:735: Tests failed!

Total time: 59 minutes 4 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
1 tests failed.
FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 7dab154a18c3219b678101f98392083d but expecting 3811393b812403697534988bac5a86ee

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 7dab154a18c3219b678101f98392083d but expecting 3811393b812403697534988bac5a86ee
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjd(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 532 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/532/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 1725 lines...]
  [javadoc]   for example:
  [javadoc]   <p> {@link #filesTotal}.set()
  [javadoc] Warning: incorrectly formatted @link in text: This class is for maintaining  the various NameNode activity statistics
  [javadoc]  and publishing them through the metrics interfaces.
  [javadoc]  This also registers the JMX MBean for RPC.
  [javadoc]  <p>
  [javadoc]  This class has a number of metrics variables that are publicly accessible;
  [javadoc]  these variables (objects) have methods to update their values;
  [javadoc]   for example:
  [javadoc]   <p> {@link #syncs}.inc()
  [javadoc]  finished
  [javadoc] JDiff: comparing the old and new APIs ...
  [javadoc]  Approximately 49% difference between the APIs
  [javadoc] JDiff: reading the comments in from file '/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/docs/jdiff/user_comments_for_hadoop-hdfs_0.20.0_to_hadoop-hdfs_0.23.0-SNAPSHOT.xml'...
  [javadoc]  (this will be created)
  [javadoc] JDiff: generating HTML report into the file '/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/docs/jdiff/changes.html' and the subdirectory '/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/docs/jdiff/changes'
  [javadoc] Note: all the comments have been newly generated
  [javadoc] JDiff: writing the comments out to file '/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/docs/jdiff/user_comments_for_hadoop-hdfs_0.20.0_to_hadoop-hdfs_0.23.0-SNAPSHOT.xml'...
  [javadoc] JDiff: finished (took 0s).

ivy-resolve-test:

ivy-retrieve-test:

compile-hdfs-test:
    [javac] Compiling 187 source files to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/hdfs/classes
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/hdfs/org/apache/hadoop/security/TestRefreshUserMappings.java:56: org.apache.hadoop.security.TestRefreshUserMappings.MockUnixGroupsMapping is not abstract and does not override abstract method cacheGroupsAdd(java.util.List<java.lang.String>) in org.apache.hadoop.security.GroupMappingServiceProvider
    [javac]   public static class MockUnixGroupsMapping implements GroupMappingServiceProvider {
    [javac]                 ^
    [javac] Note: Some input files use or override a deprecated API.
    [javac] Note: Recompile with -Xlint:deprecation for details.
    [javac] 1 error

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:402: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:434: Compile failed; see the compiler error output for details.

Total time: 50 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================


mv: cannot stat `build/*.tar.gz': No such file or directory
mv: cannot stat `build/test/findbugs': No such file or directory
Build Failed
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
No tests ran.

Hadoop-Hdfs-trunk - Build # 531 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/531/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 1745 lines...]
  [javadoc]  This also registers the JMX MBean for RPC.
  [javadoc]  <p>
  [javadoc]  This class has a number of metrics variables that are publicly accessible;
  [javadoc]  these variables (objects) have methods to update their values;
  [javadoc]   for example:
  [javadoc]   <p> {@link #syncs}.inc()
  [javadoc]  finished
  [javadoc] JDiff: comparing the old and new APIs ...
  [javadoc]  Approximately 49% difference between the APIs
  [javadoc] JDiff: reading the comments in from file '/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/docs/jdiff/user_comments_for_hadoop-hdfs_0.20.0_to_hadoop-hdfs_0.23.0-SNAPSHOT.xml'...
  [javadoc]  (this will be created)
  [javadoc] JDiff: generating HTML report into the file '/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/docs/jdiff/changes.html' and the subdirectory '/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/docs/jdiff/changes'
  [javadoc] Note: all the comments have been newly generated
  [javadoc] JDiff: writing the comments out to file '/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/docs/jdiff/user_comments_for_hadoop-hdfs_0.20.0_to_hadoop-hdfs_0.23.0-SNAPSHOT.xml'...
  [javadoc] JDiff: finished (took 0s).

ivy-resolve-test:
[ivy:resolve] downloading https://repository.apache.org/content/repositories/snapshots/org/apache/hadoop/hadoop-common/0.23.0-SNAPSHOT/hadoop-common-0.23.0-20101224.005231-23.jar ...
[ivy:resolve] ......................................................................................................................................................................................................... (1364kB)
[ivy:resolve] .. (0kB)
[ivy:resolve] 	[SUCCESSFUL ] org.apache.hadoop#hadoop-common;0.23.0-SNAPSHOT!hadoop-common.jar (4332ms)

ivy-retrieve-test:

compile-hdfs-test:
    [javac] Compiling 187 source files to /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/hdfs/classes
    [javac] /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/hdfs/org/apache/hadoop/security/TestRefreshUserMappings.java:56: org.apache.hadoop.security.TestRefreshUserMappings.MockUnixGroupsMapping is not abstract and does not override abstract method cacheGroupsAdd(java.util.List<java.lang.String>) in org.apache.hadoop.security.GroupMappingServiceProvider
    [javac]   public static class MockUnixGroupsMapping implements GroupMappingServiceProvider {
    [javac]                 ^
    [javac] Note: Some input files use or override a deprecated API.
    [javac] Note: Recompile with -Xlint:deprecation for details.
    [javac] 1 error

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:402: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:434: Compile failed; see the compiler error output for details.

Total time: 1 minute 4 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================


mv: cannot stat `build/*.tar.gz': No such file or directory
mv: cannot stat `build/test/findbugs': No such file or directory
Build Failed
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
No tests ran.

Hadoop-Hdfs-trunk - Build # 530 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/530/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 655556 lines...]
    [junit] 2010-12-23 12:36:36,620 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-23 12:36:36,620 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(786)) - Shutting down DataNode 0
    [junit] 2010-12-23 12:36:36,735 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 53369
    [junit] 2010-12-23 12:36:36,736 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 53369: exiting
    [junit] 2010-12-23 12:36:36,736 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-12-23 12:36:36,736 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:39149, storageID=DS-2023999281-127.0.1.1-39149-1293107785734, infoPort=33426, ipcPort=53369):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-23 12:36:36,737 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-23 12:36:36,737 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 53369
    [junit] 2010-12-23 12:36:36,738 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-23 12:36:36,783 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-23 12:36:36,839 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:39149, storageID=DS-2023999281-127.0.1.1-39149-1293107785734, infoPort=33426, ipcPort=53369):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-23 12:36:36,839 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 53369
    [junit] 2010-12-23 12:36:36,839 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-23 12:36:36,840 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-23 12:36:36,840 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-23 12:36:36,840 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-23 12:36:36,843 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-23 12:36:36,843 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-23 12:36:36,843 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 2 
    [junit] 2010-12-23 12:36:36,845 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 57567
    [junit] 2010-12-23 12:36:36,845 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 57567: exiting
    [junit] 2010-12-23 12:36:36,845 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 57567: exiting
    [junit] 2010-12-23 12:36:36,846 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 57567: exiting
    [junit] 2010-12-23 12:36:36,845 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 57567: exiting
    [junit] 2010-12-23 12:36:36,846 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 57567: exiting
    [junit] 2010-12-23 12:36:36,847 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 57567: exiting
    [junit] 2010-12-23 12:36:36,845 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-23 12:36:36,845 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 57567: exiting
    [junit] 2010-12-23 12:36:36,847 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 57567: exiting
    [junit] 2010-12-23 12:36:36,846 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 57567: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.314 sec
    [junit] 2010-12-23 12:36:36,846 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 57567: exiting
    [junit] 2010-12-23 12:36:36,846 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 57567

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:735: Tests failed!

Total time: 60 minutes 14 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0

Error Message:
127.0.0.1:36178is not an underUtilized node

Stack Trace:
junit.framework.AssertionFailedError: 127.0.0.1:36178is not an underUtilized node
	at org.apache.hadoop.hdfs.server.balancer.Balancer.initNodes(Balancer.java:1012)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.initNodes(Balancer.java:954)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1497)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.twoNodeTest(TestBalancer.java:312)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_29j3j5brym(TestBalancer.java:328)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0(TestBalancer.java:324)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 35bda2a9eb132b8884e16781b373cc83 but expecting c19f5cb617e5823848b8bd2b191ab709

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 35bda2a9eb132b8884e16781b373cc83 but expecting c19f5cb617e5823848b8bd2b191ab709
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjd(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 529 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/529/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 776373 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2010-12-22 13:12:32,785 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-22 13:12:32,885 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-22 13:12:32,885 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:41676, storageID=DS-445206555-127.0.1.1-41676-1293023541804, infoPort=54512, ipcPort=48251):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-22 13:12:32,886 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 48251
    [junit] 2010-12-22 13:12:32,886 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-22 13:12:32,886 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-22 13:12:32,886 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-22 13:12:32,887 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-22 13:12:32,990 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-22 13:12:32,990 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 3 
    [junit] 2010-12-22 13:12:32,990 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-22 13:12:32,991 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 41964
    [junit] 2010-12-22 13:12:32,992 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 41964: exiting
    [junit] 2010-12-22 13:12:32,992 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-22 13:12:32,993 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 41964: exiting
    [junit] 2010-12-22 13:12:32,993 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 41964: exiting
    [junit] 2010-12-22 13:12:32,993 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 41964: exiting
    [junit] 2010-12-22 13:12:32,993 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 41964: exiting
    [junit] 2010-12-22 13:12:32,993 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 41964: exiting
    [junit] 2010-12-22 13:12:32,993 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 41964: exiting
    [junit] 2010-12-22 13:12:32,993 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 41964
    [junit] 2010-12-22 13:12:32,993 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 41964: exiting
    [junit] 2010-12-22 13:12:32,993 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 41964: exiting
    [junit] 2010-12-22 13:12:32,993 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 41964: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.763 sec

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: Tests failed!

Total time: 96 minutes 24 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1523)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1388)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1334)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:577)
	at org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:781)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqwp(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1437)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files  at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)  at org.apache.hadoop.util.Shell.runCommand(Shell.java:201)  at org.apache.hadoop.util.Shell.run(Shell.java:183)  at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:376)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:462)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:445)  at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)  at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)  at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)  at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)  at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1580)  at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1558)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)  at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)  at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)  at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)  at junit.framework.TestCase.runBare(TestCase.java:132)  at junit.framework.TestResult$1.protect(TestResult.java:110)  at junit.framework.TestResult.runProtected(TestResult.java:128)  at junit.framework.TestResult.run(TestResult.java:113)  at junit.framework.TestCase.run(TestCase.java:124)  at junit.framework.TestSuite.runTest(TestSuite.java:232)  at junit.framework.TestSuite.run(TestSuite.java:227)  at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)  at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768) Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files  at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)  at java.lang.ProcessImpl.start(ProcessImpl.java:65)  at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)  ... 34 more 

Stack Trace:
java.lang.RuntimeException: Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:201)
	at org.apache.hadoop.util.Shell.run(Shell.java:183)
	at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:376)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:462)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:445)
	at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)
	at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1580)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1558)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
	at java.lang.ProcessImpl.start(ProcessImpl.java:65)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)

	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:516)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1580)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1558)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 4c82739c1429171e8cef59b2d518a759 but expecting 4251937469ce1e214b54e9d2d1ee40a4

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 4c82739c1429171e8cef59b2d518a759 but expecting 4251937469ce1e214b54e9d2d1ee40a4
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjd(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 528 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/528/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 841373 lines...]
    [junit] 2010-12-21 13:23:19,658 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-21 13:23:19,658 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(786)) - Shutting down DataNode 0
    [junit] 2010-12-21 13:23:19,760 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 35416
    [junit] 2010-12-21 13:23:19,760 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 35416: exiting
    [junit] 2010-12-21 13:23:19,760 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-21 13:23:19,760 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-12-21 13:23:19,760 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 35416
    [junit] 2010-12-21 13:23:19,760 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:50831, storageID=DS-1946037341-127.0.1.1-50831-1292937788838, infoPort=36930, ipcPort=35416):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-21 13:23:19,763 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-21 13:23:19,863 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-21 13:23:19,863 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:50831, storageID=DS-1946037341-127.0.1.1-50831-1292937788838, infoPort=36930, ipcPort=35416):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-21 13:23:19,864 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 35416
    [junit] 2010-12-21 13:23:19,864 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-21 13:23:19,864 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-21 13:23:19,864 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-21 13:23:19,865 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-21 13:23:19,969 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-21 13:23:19,969 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 3 
    [junit] 2010-12-21 13:23:19,969 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-21 13:23:19,970 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 34872
    [junit] 2010-12-21 13:23:19,971 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 34872: exiting
    [junit] 2010-12-21 13:23:19,971 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 34872: exiting
    [junit] 2010-12-21 13:23:19,971 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 34872: exiting
    [junit] 2010-12-21 13:23:19,971 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 34872: exiting
    [junit] 2010-12-21 13:23:19,971 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 34872: exiting
    [junit] 2010-12-21 13:23:19,971 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 34872: exiting
    [junit] 2010-12-21 13:23:19,971 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 34872: exiting
    [junit] 2010-12-21 13:23:19,971 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 34872: exiting
    [junit] 2010-12-21 13:23:19,971 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 34872: exiting
    [junit] 2010-12-21 13:23:19,971 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 34872: exiting
    [junit] 2010-12-21 13:23:19,973 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-21 13:23:19,973 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 34872
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.233 sec

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: Tests failed!

Total time: 107 minutes 12 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot run program "du": java.io.IOException: error=24, Too many open files

Stack Trace:
java.io.IOException: Cannot run program "du": java.io.IOException: error=24, Too many open files
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:201)
	at org.apache.hadoop.util.Shell.run(Shell.java:183)
	at org.apache.hadoop.fs.DU.<init>(DU.java:57)
	at org.apache.hadoop.fs.DU.<init>(DU.java:67)
	at org.apache.hadoop.hdfs.server.datanode.FSDataset$FSVolume.<init>(FSDataset.java:342)
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.<init>(FSDataset.java:873)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initFsDataSet(DataNode.java:395)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:500)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:315)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2wjxr3fqwv(TestFileConcurrentReader.java:290)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite(TestFileConcurrentReader.java:289)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
	at java.lang.ProcessImpl.start(ProcessImpl.java:65)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 49b1c766a3abae340a8a9182865daa83 but expecting d9c3bd1a2d0a232f264d907016ecb0b8

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 49b1c766a3abae340a8a9182865daa83 but expecting d9c3bd1a2d0a232f264d907016ecb0b8
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjd(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 527 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/527/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 330449 lines...]
    [junit] 2010-12-20 23:59:41,456 INFO  datanode.DataNode (DataNode.java:transferBlock(1199)) - DatanodeRegistration(127.0.0.1:45864, storageID=DS-858089567-127.0.1.1-45864-1292888975184, infoPort=44821, ipcPort=57957) Starting thread to transfer block blk_7017067817949553972_1016 to 127.0.0.1:38343 
    [junit] 2010-12-20 23:59:41,456 WARN  datanode.DataNode (DataNode.java:run(1388)) - DatanodeRegistration(127.0.0.1:45864, storageID=DS-858089567-127.0.1.1-45864-1292888975184, infoPort=44821, ipcPort=57957):Failed to transfer blk_7017067817949553972_1016 to 127.0.0.1:38343 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1356)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-20 23:59:42,932 INFO  hdfs.StateChange (BlockManager.java:computeReplicationWorkForBlock(940)) - BLOCK* ask 127.0.0.1:45864 to replicate blk_8736468865754296066_1019 to datanode(s) 127.0.0.1:38343
    [junit] 2010-12-20 23:59:44,438 INFO  datanode.DataNode (DataNode.java:transferBlock(1199)) - DatanodeRegistration(127.0.0.1:45864, storageID=DS-858089567-127.0.1.1-45864-1292888975184, infoPort=44821, ipcPort=57957) Starting thread to transfer block blk_8736468865754296066_1019 to 127.0.0.1:38343 
    [junit] 2010-12-20 23:59:44,439 WARN  datanode.DataNode (DataNode.java:run(1388)) - DatanodeRegistration(127.0.0.1:45864, storageID=DS-858089567-127.0.1.1-45864-1292888975184, infoPort=44821, ipcPort=57957):Failed to transfer blk_8736468865754296066_1019 to 127.0.0.1:38343 got java.net.ConnectException: Connection refused
    [junit] 	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit] 	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit] 	at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit] 	at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1356)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-21 00:02:36,824 DEBUG datanode.DataNode (BlockSender.java:<init>(142)) - block=blk_2554629569528528303_1011, replica=FinalizedReplica, blk_2554629569528528303_1011, FINALIZED
    [junit]   getNumBytes()     = 58
    [junit]   getBytesOnDisk()  = 58
    [junit]   getVisibleLength()= 58
    [junit]   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized
    [junit]   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/blk_2554629569528528303
    [junit]   unlinked=false
    [junit] 2010-12-21 00:02:36,824 DEBUG datanode.DataNode (BlockSender.java:<init>(237)) - replica=FinalizedReplica, blk_2554629569528528303_1011, FINALIZED
    [junit]   getNumBytes()     = 58
    [junit]   getBytesOnDisk()  = 58
    [junit]   getVisibleLength()= 58
    [junit]   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized
    [junit]   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/blk_2554629569528528303
    [junit]   unlinked=false
    [junit] 2010-12-21 00:02:36,825 INFO  datanode.DataBlockScanner (DataBlockScanner.java:verifyBlock(447)) - Verification succeeded for blk_2554629569528528303_1011
    [junit] 2010-12-21 00:03:17,842 DEBUG datanode.DataNode (BlockSender.java:<init>(142)) - block=blk_4163141904047184515_1017, replica=FinalizedReplica, blk_4163141904047184515_1017, FINALIZED
    [junit]   getNumBytes()     = 58
    [junit]   getBytesOnDisk()  = 58
    [junit]   getVisibleLength()= 58
    [junit]   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized
    [junit]   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/blk_4163141904047184515
    [junit]   unlinked=false
    [junit] 2010-12-21 00:03:17,842 DEBUG datanode.DataNode (BlockSender.java:<init>(237)) - replica=FinalizedReplica, blk_4163141904047184515_1017, FINALIZED
    [junit]   getNumBytes()     = 58
    [junit]   getBytesOnDisk()  = 58
    [junit]   getVisibleLength()= 58
    [junit]   getVolume()       = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized
    [junit]   getBlockFile()    = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/blk_4163141904047184515
    [junit]   unlinked=false
    [junit] 2010-12-21 00:03:17,842 INFO  datanode.DataBlockScanner (DataBlockScanner.java:verifyBlock(447)) - Verification succeeded for blk_4163141904047184515_1017
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
FAILED:  TEST-org.apache.hadoop.hdfs.TestFileCreationClient.xml.<init>

Error Message:


Stack Trace:
Test report file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/TEST-org.apache.hadoop.hdfs.TestFileCreationClient.xml was length 0

FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of fe79723374828bbaed186d67bababcf5 but expecting 20f04f89cced75c18950e4a3a4eb2383

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of fe79723374828bbaed186d67bababcf5 but expecting 20f04f89cced75c18950e4a3a4eb2383
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjd(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 526 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/526/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 998 lines...]
     [echo] contrib: thriftfs

clean-fi:

clean-sign:

clean:

clean-contrib:

clean:

check-libhdfs-fuse:

clean:
Trying to override old definition of task macro_tar

clean:
     [echo] contrib: hdfsproxy

clean:
     [echo] contrib: thriftfs

clean-fi:

clean-sign:

clean:

clean-cache:
   [delete] Deleting directory /homes/hudson/.ivy2/cache/org.apache.hadoop

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:1211: Unable to delete file /homes/hudson/.ivy2/cache/org.apache.hadoop/avro/jars/.nfs000000000542200800000003

Total time: 1 second


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================


mv: cannot stat `build/*.tar.gz': No such file or directory
mv: cannot stat `build/*.jar': No such file or directory
mv: cannot stat `build/test/findbugs': No such file or directory
mv: cannot stat `build/docs/api': No such file or directory
Build Failed
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
No tests ran.

Hadoop-Hdfs-trunk - Build # 525 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/525/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 998 lines...]
     [echo] contrib: thriftfs

clean-fi:

clean-sign:

clean:

clean-contrib:

clean:

check-libhdfs-fuse:

clean:
Trying to override old definition of task macro_tar

clean:
     [echo] contrib: hdfsproxy

clean:
     [echo] contrib: thriftfs

clean-fi:

clean-sign:

clean:

clean-cache:
   [delete] Deleting directory /homes/hudson/.ivy2/cache/org.apache.hadoop

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:1211: Unable to delete directory /homes/hudson/.ivy2/cache/org.apache.hadoop

Total time: 1 second


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================


mv: cannot stat `build/*.tar.gz': No such file or directory
mv: cannot stat `build/*.jar': No such file or directory
mv: cannot stat `build/test/findbugs': No such file or directory
mv: cannot stat `build/docs/api': No such file or directory
Build Failed
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
No tests ran.

Hadoop-Hdfs-trunk - Build # 524 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/524/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 562367 lines...]
    [junit] 2010-12-18 12:19:07,550 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_3545631296030892058_1013 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir58/blk_3545631296030892058 for deletion
    [junit] 2010-12-18 12:19:07,550 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3455264697769994764_1051 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir24/blk_3455264697769994764
    [junit] 2010-12-18 12:19:07,550 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_3595433458321327697_1003 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir50/blk_3595433458321327697 for deletion
    [junit] 2010-12-18 12:19:07,551 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3545631296030892058_1013 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir58/blk_3545631296030892058
    [junit] 2010-12-18 12:19:07,551 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_3743016559550481858_1060 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir31/blk_3743016559550481858 for deletion
    [junit] 2010-12-18 12:19:07,551 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3595433458321327697_1003 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir50/blk_3595433458321327697
    [junit] 2010-12-18 12:19:07,551 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_3910365645685547700_1063 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir33/blk_3910365645685547700 for deletion
    [junit] 2010-12-18 12:19:07,551 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3743016559550481858_1060 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir31/blk_3743016559550481858
    [junit] 2010-12-18 12:19:07,552 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_4939098437181070526_1089 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir26/subdir61/blk_4939098437181070526 for deletion
    [junit] 2010-12-18 12:19:07,552 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_3910365645685547700_1063 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir33/blk_3910365645685547700
    [junit] 2010-12-18 12:19:07,552 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_4987212060163510157_1068 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir37/blk_4987212060163510157 for deletion
    [junit] 2010-12-18 12:19:07,552 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4939098437181070526_1089 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir26/subdir61/blk_4939098437181070526
    [junit] 2010-12-18 12:19:07,552 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_4987212060163510157_1068 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir37/blk_4987212060163510157
    [junit] 2010-12-18 12:19:07,552 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_5166418008350096511_1087 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir26/subdir60/blk_5166418008350096511 for deletion
    [junit] 2010-12-18 12:19:07,553 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_5418891647848091001_1065 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir35/blk_5418891647848091001 for deletion
    [junit] 2010-12-18 12:19:07,553 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5166418008350096511_1087 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir26/subdir60/blk_5166418008350096511
    [junit] 2010-12-18 12:19:07,553 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5418891647848091001_1065 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir35/blk_5418891647848091001
    [junit] 2010-12-18 12:19:07,553 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_5976249898144668751_1035 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir56/blk_5976249898144668751 for deletion
    [junit] 2010-12-18 12:19:07,554 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6242682924716876725_1094 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir16/subdir15/blk_6242682924716876725 for deletion
    [junit] 2010-12-18 12:19:07,554 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_5976249898144668751_1035 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir56/blk_5976249898144668751
    [junit] 2010-12-18 12:19:07,554 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6242682924716876725_1094 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir16/subdir15/blk_6242682924716876725
    [junit] 2010-12-18 12:19:07,554 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6417672471917492459_1074 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir23/blk_6417672471917492459 for deletion
    [junit] 2010-12-18 12:19:07,554 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6494435325135062124_1019 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir44/blk_6494435325135062124 for deletion
    [junit] 2010-12-18 12:19:07,554 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6417672471917492459_1074 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir23/blk_6417672471917492459
    [junit] 2010-12-18 12:19:07,555 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6494435325135062124_1019 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir44/blk_6494435325135062124
    [junit] 2010-12-18 12:19:07,555 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6534644844089116729_1090 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir26/subdir63/blk_6534644844089116729 for deletion
    [junit] 2010-12-18 12:19:07,555 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_6953903060091692675_1021 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir1/blk_6953903060091692675 for deletion
    [junit] 2010-12-18 12:19:07,555 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6534644844089116729_1090 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir26/subdir63/blk_6534644844089116729
    [junit] 2010-12-18 12:19:07,555 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_6953903060091692675_1021 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir1/blk_6953903060091692675
    [junit] 2010-12-18 12:19:07,555 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7178872197128046686_1082 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir48/blk_7178872197128046686 for deletion
    [junit] 2010-12-18 12:19:07,556 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7683627936677494880_1053 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir7/blk_7683627936677494880 for deletion
    [junit] 2010-12-18 12:19:07,556 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7178872197128046686_1082 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir48/blk_7178872197128046686
    [junit] 2010-12-18 12:19:07,556 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7683627936677494880_1053 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir7/blk_7683627936677494880
    [junit] 2010-12-18 12:19:07,556 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_7776601040508080192_1011 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir38/blk_7776601040508080192 for deletion
    [junit] 2010-12-18 12:19:07,557 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8166239740742971741_1054 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir27/blk_8166239740742971741 for deletion
    [junit] 2010-12-18 12:19:07,557 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_7776601040508080192_1011 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir38/blk_7776601040508080192
    [junit] 2010-12-18 12:19:07,557 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8166239740742971741_1054 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir27/blk_8166239740742971741
    [junit] 2010-12-18 12:19:07,557 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8301356783737485170_1033 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir10/blk_8301356783737485170 for deletion
    [junit] 2010-12-18 12:19:07,557 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8452717871598163582_1069 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir19/blk_8452717871598163582 for deletion
    [junit] 2010-12-18 12:19:07,557 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8301356783737485170_1033 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir10/blk_8301356783737485170
    [junit] 2010-12-18 12:19:07,558 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8452717871598163582_1069 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir19/blk_8452717871598163582
    [junit] 2010-12-18 12:19:07,558 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8820831095681698094_1086 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir16/subdir9/blk_8820831095681698094 for deletion
    [junit] 2010-12-18 12:19:07,558 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_8897631248351143416_1030 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir53/blk_8897631248351143416 for deletion
    [junit] 2010-12-18 12:19:07,558 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8820831095681698094_1086 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir16/subdir9/blk_8820831095681698094
    [junit] 2010-12-18 12:19:07,558 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_9106812904785972780_1061 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir13/blk_9106812904785972780 for deletion
    [junit] 2010-12-18 12:19:07,558 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_8897631248351143416_1030 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir53/blk_8897631248351143416
    [junit] 2010-12-18 12:19:07,559 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_9106812904785972780_1061 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/finalized/subdir13/blk_9106812904785972780
    [junit] 2010-12-18 12:19:07,559 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:deleteAsync(152)) - Scheduling block blk_9168316300231738676_1046 file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir20/blk_9168316300231738676 for deletion
    [junit] 2010-12-18 12:19:07,559 INFO  datanode.DataNode (FSDatasetAsyncDiskService.java:run(198)) - Deleted block blk_9168316300231738676_1046 at file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data2/current/finalized/subdir20/blk_9168316300231738676
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
FAILED:  TEST-org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete.xml.<init>

Error Message:


Stack Trace:
Test report file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/TEST-org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete.xml was length 0

FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.EPollArrayWrapper.epollCreate(Native Method)
	at sun.nio.ch.EPollArrayWrapper.<init>(EPollArrayWrapper.java:68)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:52)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 6654eb70b11f0d11fdcb61c7ea2b9f44 but expecting c1b340ef555a0088c7db544e231b816a

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 6654eb70b11f0d11fdcb61c7ea2b9f44 but expecting c1b340ef555a0088c7db544e231b816a
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjd(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 523 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/523/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 787064 lines...]
    [junit] 2010-12-17 13:24:14,217 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-17 13:24:14,218 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-17 13:24:14,218 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(786)) - Shutting down DataNode 0
    [junit] 2010-12-17 13:24:14,320 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 36468
    [junit] 2010-12-17 13:24:14,320 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 36468: exiting
    [junit] 2010-12-17 13:24:14,321 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 36468
    [junit] 2010-12-17 13:24:14,321 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-17 13:24:14,321 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:39023, storageID=DS-1608461355-127.0.1.1-39023-1292592243421, infoPort=54221, ipcPort=36468):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-17 13:24:14,321 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-12-17 13:24:14,422 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-17 13:24:14,422 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:39023, storageID=DS-1608461355-127.0.1.1-39023-1292592243421, infoPort=54221, ipcPort=36468):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-17 13:24:14,422 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 36468
    [junit] 2010-12-17 13:24:14,423 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-17 13:24:14,423 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-17 13:24:14,423 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-17 13:24:14,423 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-17 13:24:14,525 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-17 13:24:14,526 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 3 
    [junit] 2010-12-17 13:24:14,526 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-17 13:24:14,527 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 52977
    [junit] 2010-12-17 13:24:14,527 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 52977: exiting
    [junit] 2010-12-17 13:24:14,527 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 52977: exiting
    [junit] 2010-12-17 13:24:14,528 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 52977
    [junit] 2010-12-17 13:24:14,528 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-17 13:24:14,528 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 52977: exiting
    [junit] 2010-12-17 13:24:14,528 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 52977: exiting
    [junit] 2010-12-17 13:24:14,528 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 52977: exiting
    [junit] 2010-12-17 13:24:14,528 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 52977: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.517 sec
    [junit] 2010-12-17 13:24:14,528 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 52977: exiting
    [junit] 2010-12-17 13:24:14,528 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 52977: exiting
    [junit] 2010-12-17 13:24:14,528 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 52977: exiting
    [junit] 2010-12-17 13:24:14,528 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 52977: exiting

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: Tests failed!

Total time: 108 minutes 13 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of b2f34e516868ebb6d834088e4f0fb18e but expecting 303a46198e460cbbc20a873d59d8a51a

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of b2f34e516868ebb6d834088e4f0fb18e but expecting 303a46198e460cbbc20a873d59d8a51a
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjd(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 522 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/522/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 647499 lines...]
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2010-12-16 12:35:26,713 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-16 12:35:26,814 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-16 12:35:26,814 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:47866, storageID=DS-1618016816-127.0.1.1-47866-1292502915675, infoPort=58840, ipcPort=48310):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-16 12:35:26,814 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 48310
    [junit] 2010-12-16 12:35:26,814 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-16 12:35:26,815 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-16 12:35:26,815 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-16 12:35:26,815 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-16 12:35:26,917 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-16 12:35:26,918 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3 
    [junit] 2010-12-16 12:35:26,918 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-16 12:35:26,919 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 52662
    [junit] 2010-12-16 12:35:26,919 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 52662: exiting
    [junit] 2010-12-16 12:35:26,920 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 52662
    [junit] 2010-12-16 12:35:26,920 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 52662: exiting
    [junit] 2010-12-16 12:35:26,920 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 52662: exiting
    [junit] 2010-12-16 12:35:26,920 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 52662: exiting
    [junit] 2010-12-16 12:35:26,920 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 52662: exiting
    [junit] 2010-12-16 12:35:26,920 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 52662: exiting
    [junit] 2010-12-16 12:35:26,920 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 52662: exiting
    [junit] 2010-12-16 12:35:26,920 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 52662: exiting
    [junit] 2010-12-16 12:35:26,920 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 52662: exiting
    [junit] 2010-12-16 12:35:26,920 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 52662: exiting
    [junit] 2010-12-16 12:35:26,921 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.359 sec

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:722: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:488: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:674: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:637: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:705: Tests failed!

Total time: 59 minutes 18 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
5 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFiHFlush.hFlushFi01_a

Error Message:
null

Stack Trace:
junit.framework.AssertionFailedError: 
	at org.apache.hadoop.hdfs.TestFiHFlush.runDiskErrorTest(TestFiHFlush.java:56)
	at org.apache.hadoop.hdfs.TestFiHFlush.hFlushFi01_a(TestFiHFlush.java:72)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.EPollArrayWrapper.epollCreate(Native Method)
	at sun.nio.ch.EPollArrayWrapper.<init>(EPollArrayWrapper.java:68)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:52)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 646dcca0b5757ec6c7c378e5b8639312 but expecting daa27d46253e72e17527403594cef6d5

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 646dcca0b5757ec6c7c378e5b8639312 but expecting daa27d46253e72e17527403594cef6d5
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjd(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 521 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/521/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 821928 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2010-12-15 13:24:10,823 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-12-15 13:24:10,924 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-15 13:24:10,924 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:34915, storageID=DS-1460572231-127.0.1.1-34915-1292419439913, infoPort=56390, ipcPort=42973):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-15 13:24:10,924 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 42973
    [junit] 2010-12-15 13:24:10,924 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-15 13:24:10,925 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-15 13:24:10,925 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-15 13:24:10,926 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-15 13:24:11,027 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-15 13:24:11,028 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 7 3 
    [junit] 2010-12-15 13:24:11,028 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-15 13:24:11,029 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 51182
    [junit] 2010-12-15 13:24:11,029 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 51182: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.215 sec
    [junit] 2010-12-15 13:24:11,029 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 51182: exiting
    [junit] 2010-12-15 13:24:11,032 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 51182: exiting
    [junit] 2010-12-15 13:24:11,030 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 51182
    [junit] 2010-12-15 13:24:11,044 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 51182: exiting
    [junit] 2010-12-15 13:24:11,030 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-15 13:24:11,045 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 51182: exiting
    [junit] 2010-12-15 13:24:11,030 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 51182: exiting
    [junit] 2010-12-15 13:24:11,030 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 51182: exiting
    [junit] 2010-12-15 13:24:11,044 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 51182: exiting
    [junit] 2010-12-15 13:24:11,044 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 51182: exiting
    [junit] 2010-12-15 13:24:11,044 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 51182: exiting

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: Tests failed!

Total time: 108 minutes 14 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:451)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.EPollArrayWrapper.epollCreate(Native Method)
	at sun.nio.ch.EPollArrayWrapper.<init>(EPollArrayWrapper.java:68)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:52)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:630)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:464)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:186)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:178)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of ea0c381daf031b66603e1e64deda0a33 but expecting db35a63d2e6e5c3efbe94772b0cc1bc6

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of ea0c381daf031b66603e1e64deda0a33 but expecting db35a63d2e6e5c3efbe94772b0cc1bc6
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tjd(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 520 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/520/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 809539 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2010-12-14 13:22:23,275 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-14 13:22:23,376 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-14 13:22:23,376 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:57551, storageID=DS-1642533549-127.0.1.1-57551-1292332932395, infoPort=53979, ipcPort=46629):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-14 13:22:23,376 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 46629
    [junit] 2010-12-14 13:22:23,376 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-14 13:22:23,377 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-14 13:22:23,377 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-14 13:22:23,377 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-14 13:22:23,479 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-14 13:22:23,479 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 4 
    [junit] 2010-12-14 13:22:23,479 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-14 13:22:23,481 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 38151
    [junit] 2010-12-14 13:22:23,482 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 38151: exiting
    [junit] 2010-12-14 13:22:23,482 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 38151: exiting
    [junit] 2010-12-14 13:22:23,482 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 38151
    [junit] 2010-12-14 13:22:23,482 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 38151: exiting
    [junit] 2010-12-14 13:22:23,483 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 38151: exiting
    [junit] 2010-12-14 13:22:23,483 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 38151: exiting
    [junit] 2010-12-14 13:22:23,482 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-14 13:22:23,484 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 38151: exiting
    [junit] 2010-12-14 13:22:23,484 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 38151: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.2 sec
    [junit] 2010-12-14 13:22:23,483 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 38151: exiting
    [junit] 2010-12-14 13:22:23,483 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 38151: exiting
    [junit] 2010-12-14 13:22:23,482 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 38151: exiting

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: Tests failed!

Total time: 106 minutes 29 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1523)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1388)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1334)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:577)
	at org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:781)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqtz(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1437)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 1ffca26376ad4f334c98add26d98402f but expecting 16e9b295925867c8073b34772c2cbf3d

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 1ffca26376ad4f334c98add26d98402f but expecting 16e9b295925867c8073b34772c2cbf3d
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tgn(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 519 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/519/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 688600 lines...]
    [junit] 2010-12-13 13:25:14,531 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(770)) - Shutting down DataNode 0
    [junit] 2010-12-13 13:25:14,633 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 54175
    [junit] 2010-12-13 13:25:14,633 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 54175: exiting
    [junit] 2010-12-13 13:25:14,634 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 54175
    [junit] 2010-12-13 13:25:14,634 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-13 13:25:14,634 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:37035, storageID=DS-2097397539-127.0.1.1-37035-1292246703713, infoPort=57042, ipcPort=54175):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-13 13:25:14,634 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-13 13:25:14,735 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-13 13:25:14,736 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:37035, storageID=DS-2097397539-127.0.1.1-37035-1292246703713, infoPort=57042, ipcPort=54175):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-13 13:25:14,736 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 54175
    [junit] 2010-12-13 13:25:14,736 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-13 13:25:14,736 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-13 13:25:14,736 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-13 13:25:14,737 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-13 13:25:14,839 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-13 13:25:14,839 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-13 13:25:14,840 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 4 2 
    [junit] 2010-12-13 13:25:14,841 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 37066
    [junit] 2010-12-13 13:25:14,841 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 37066: exiting
    [junit] 2010-12-13 13:25:14,841 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 37066: exiting
    [junit] 2010-12-13 13:25:14,842 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 37066: exiting
    [junit] 2010-12-13 13:25:14,842 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 37066: exiting
    [junit] 2010-12-13 13:25:14,842 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 37066: exiting
    [junit] 2010-12-13 13:25:14,841 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 37066: exiting
    [junit] 2010-12-13 13:25:14,843 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-13 13:25:14,842 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 37066: exiting
    [junit] 2010-12-13 13:25:14,842 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 37066: exiting
    [junit] 2010-12-13 13:25:14,842 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 37066: exiting
    [junit] 2010-12-13 13:25:14,842 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 37066: exiting
    [junit] 2010-12-13 13:25:14,842 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 37066
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.293 sec

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:722: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:488: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:680: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:637: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:705: Tests failed!

Total time: 108 minutes 47 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
7 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFiHFlush.hFlushFi02_a

Error Message:
null

Stack Trace:
junit.framework.AssertionFailedError: 
	at org.apache.hadoop.hdfs.TestFiHFlush.runDiskErrorTest(TestFiHFlush.java:56)
	at org.apache.hadoop.hdfs.TestFiHFlush.hFlushFi02_a(TestFiHFlush.java:114)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  org.apache.hadoop.hdfs.TestLargeBlock.testLargeBlockSize

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


REGRESSION:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2

Error Message:
Timeout occurred. Please note the time in the report does not reflect the time until the timeout.

Stack Trace:
junit.framework.AssertionFailedError: Timeout occurred. Please note the time in the report does not reflect the time until the timeout.


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of bb061d260e655fc5f377934a0545e1e1 but expecting 21eebd450d9bcc48f4f256ddc67aaba4

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of bb061d260e655fc5f377934a0545e1e1 but expecting 21eebd450d9bcc48f4f256ddc67aaba4
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tgn(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 518 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/518/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 816702 lines...]
    [junit] 2010-12-12 13:21:07,914 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-12 13:21:07,915 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(770)) - Shutting down DataNode 0
    [junit] 2010-12-12 13:21:08,016 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 37045
    [junit] 2010-12-12 13:21:08,017 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 37045
    [junit] 2010-12-12 13:21:08,017 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-12-12 13:21:08,017 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-12 13:21:08,017 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:59958, storageID=DS-1064452434-127.0.1.1-59958-1292160057073, infoPort=60597, ipcPort=37045):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-12 13:21:08,018 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 37045: exiting
    [junit] 2010-12-12 13:21:08,019 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-12 13:21:08,120 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-12 13:21:08,121 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:59958, storageID=DS-1064452434-127.0.1.1-59958-1292160057073, infoPort=60597, ipcPort=37045):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-12 13:21:08,121 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 37045
    [junit] 2010-12-12 13:21:08,121 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-12 13:21:08,121 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-12 13:21:08,121 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-12 13:21:08,122 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-12 13:21:08,224 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-12 13:21:08,224 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 6 3 
    [junit] 2010-12-12 13:21:08,224 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-12 13:21:08,226 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 48603
    [junit] 2010-12-12 13:21:08,226 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 48603: exiting
    [junit] 2010-12-12 13:21:08,226 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 48603: exiting
    [junit] 2010-12-12 13:21:08,226 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 48603: exiting
    [junit] 2010-12-12 13:21:08,227 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 48603: exiting
    [junit] 2010-12-12 13:21:08,226 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 48603: exiting
    [junit] 2010-12-12 13:21:08,227 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 48603: exiting
    [junit] 2010-12-12 13:21:08,226 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 48603: exiting
    [junit] 2010-12-12 13:21:08,227 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 48603: exiting
    [junit] 2010-12-12 13:21:08,226 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 48603
    [junit] 2010-12-12 13:21:08,227 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 48603: exiting
    [junit] 2010-12-12 13:21:08,227 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 48603: exiting
    [junit] 2010-12-12 13:21:08,227 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.419 sec

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: Tests failed!

Total time: 105 minutes 16 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
2 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of d502717e6a085762b3b75192b560e592 but expecting 52fded5a8489002af197f5d00cdf97f7

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of d502717e6a085762b3b75192b560e592 but expecting 52fded5a8489002af197f5d00cdf97f7
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tgn(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 517 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/517/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 826746 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2010-12-11 13:21:11,346 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-11 13:21:11,411 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-11 13:21:11,446 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:60926, storageID=DS-217843813-127.0.1.1-60926-1292073660370, infoPort=42538, ipcPort=56175):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-11 13:21:11,446 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 56175
    [junit] 2010-12-11 13:21:11,447 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-11 13:21:11,447 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-11 13:21:11,447 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-11 13:21:11,447 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-11 13:21:11,549 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-11 13:21:11,549 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 4 4 
    [junit] 2010-12-11 13:21:11,550 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-11 13:21:11,551 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 38934
    [junit] 2010-12-11 13:21:11,551 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 38934: exiting
    [junit] 2010-12-11 13:21:11,551 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 38934: exiting
    [junit] 2010-12-11 13:21:11,552 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 38934: exiting
    [junit] 2010-12-11 13:21:11,552 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 38934: exiting
    [junit] 2010-12-11 13:21:11,554 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 38934: exiting
    [junit] 2010-12-11 13:21:11,552 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 38934: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.17 sec
    [junit] 2010-12-11 13:21:11,552 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 38934: exiting
    [junit] 2010-12-11 13:21:11,552 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-11 13:21:11,551 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 38934
    [junit] 2010-12-11 13:21:11,554 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 38934: exiting
    [junit] 2010-12-11 13:21:11,554 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 38934: exiting
    [junit] 2010-12-11 13:21:11,553 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 38934: exiting

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: Tests failed!

Total time: 105 minutes 22 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
3 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks expected:<2> but was:<1>
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2j2e00jqf1(TestBlockReport.java:414)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08(TestBlockReport.java:390)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1523)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1388)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1334)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:577)
	at org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:781)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqtz(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1437)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of c473986841e7cc311c1abf192dea4637 but expecting ead7e9810b7bfce73a3e69e4661b15aa

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of c473986841e7cc311c1abf192dea4637 but expecting ead7e9810b7bfce73a3e69e4661b15aa
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tgn(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 516 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/516/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 780060 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2010-12-10 13:22:33,759 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-10 13:22:33,859 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-10 13:22:33,860 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:36467, storageID=DS-533718375-127.0.1.1-36467-1291987342724, infoPort=47410, ipcPort=49222):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-10 13:22:33,860 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 49222
    [junit] 2010-12-10 13:22:33,860 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-10 13:22:33,860 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-10 13:22:33,860 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-10 13:22:33,861 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-10 13:22:33,963 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-10 13:22:33,963 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 4 3 
    [junit] 2010-12-10 13:22:33,963 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-10 13:22:33,964 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 56298
    [junit] 2010-12-10 13:22:33,965 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 56298: exiting
    [junit] 2010-12-10 13:22:33,965 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 56298
    [junit] 2010-12-10 13:22:33,965 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 56298: exiting
    [junit] 2010-12-10 13:22:33,965 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 56298: exiting
    [junit] 2010-12-10 13:22:33,965 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 56298: exiting
    [junit] 2010-12-10 13:22:33,965 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 56298: exiting
    [junit] 2010-12-10 13:22:33,965 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 56298: exiting
    [junit] 2010-12-10 13:22:33,965 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 56298: exiting
    [junit] 2010-12-10 13:22:33,965 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 56298: exiting
    [junit] 2010-12-10 13:22:33,965 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 56298: exiting
    [junit] 2010-12-10 13:22:33,965 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 56298: exiting
    [junit] 2010-12-10 13:22:33,965 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.359 sec

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: Tests failed!

Total time: 106 minutes 50 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
4 tests failed.
FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files  at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)  at org.apache.hadoop.util.Shell.runCommand(Shell.java:201)  at org.apache.hadoop.util.Shell.run(Shell.java:183)  at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:376)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:462)  at org.apache.hadoop.util.Shell.execCommand(Shell.java:445)  at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)  at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)  at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)  at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)  at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)  at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1580)  at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1558)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)  at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)  at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)  at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)  at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)  at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)  at junit.framework.TestCase.runBare(TestCase.java:132)  at junit.framework.TestResult$1.protect(TestResult.java:110)  at junit.framework.TestResult.runProtected(TestResult.java:128)  at junit.framework.TestResult.run(TestResult.java:113)  at junit.framework.TestCase.run(TestCase.java:124)  at junit.framework.TestSuite.runTest(TestSuite.java:232)  at junit.framework.TestSuite.run(TestSuite.java:227)  at org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)  at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)  at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768) Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files  at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)  at java.lang.ProcessImpl.start(ProcessImpl.java:65)  at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)  ... 34 more 

Stack Trace:
java.lang.RuntimeException: Error while running command to get file permissions : java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open files
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:201)
	at org.apache.hadoop.util.Shell.run(Shell.java:183)
	at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:376)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:462)
	at org.apache.hadoop.util.Shell.execCommand(Shell.java:445)
	at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:565)
	at org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:49)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:491)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1580)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1558)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
	at java.lang.ProcessImpl.start(ProcessImpl.java:65)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)

	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:516)
	at org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:466)
	at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1580)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1558)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1523)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1388)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1334)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:577)
	at org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:781)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqtz(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1437)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 021344c3204fb1bb7e274d192a820e61 but expecting 9af8b5ee0db1a53463b07b65079c2eb9

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 021344c3204fb1bb7e274d192a820e61 but expecting 9af8b5ee0db1a53463b07b65079c2eb9
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4tgn(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 515 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/515/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 683811 lines...]
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2010-12-09 12:45:28,560 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-09 12:45:28,661 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-09 12:45:28,661 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:49781, storageID=DS-288512829-127.0.1.1-49781-1291898717691, infoPort=58639, ipcPort=47283):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-09 12:45:28,661 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 47283
    [junit] 2010-12-09 12:45:28,661 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-09 12:45:28,662 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-09 12:45:28,662 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-09 12:45:28,662 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-09 12:45:28,764 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-09 12:45:28,765 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 10 2 
    [junit] 2010-12-09 12:45:28,764 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-09 12:45:28,766 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 52379
    [junit] 2010-12-09 12:45:28,766 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 52379: exiting
    [junit] 2010-12-09 12:45:28,766 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 52379: exiting
    [junit] 2010-12-09 12:45:28,767 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-09 12:45:28,767 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 52379
    [junit] 2010-12-09 12:45:28,767 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 52379: exiting
    [junit] 2010-12-09 12:45:28,767 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 52379: exiting
    [junit] 2010-12-09 12:45:28,768 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 52379: exiting
    [junit] 2010-12-09 12:45:28,767 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 52379: exiting
    [junit] 2010-12-09 12:45:28,767 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 52379: exiting
    [junit] 2010-12-09 12:45:28,767 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 52379: exiting
    [junit] 2010-12-09 12:45:28,767 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 52379: exiting
    [junit] 2010-12-09 12:45:28,767 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 52379: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.335 sec

checkfailure:
    [touch] Creating /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/testsfailed

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:722: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:488: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/src/test/aop/build/aop.xml:230: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:680: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:637: The following error occurred while executing this line:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:705: Tests failed!

Total time: 69 minutes 44 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFiHFlush.hFlushFi02_a

Error Message:
null

Stack Trace:
junit.framework.AssertionFailedError: 
	at org.apache.hadoop.hdfs.TestFiHFlush.runDiskErrorTest(TestFiHFlush.java:56)
	at org.apache.hadoop.hdfs.TestFiHFlush.hFlushFi02_a(TestFiHFlush.java:114)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_29j3j5brtf(TestBalancer.java:327)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0(TestBalancer.java:324)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancerDefaultConstructor(TestBalancer.java:279)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancerDefaultConstructor(TestBalancer.java:376)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_2g13gq9rto(TestBalancer.java:344)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2(TestBalancer.java:341)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.integrationTest(TestBalancer.java:319)
	at org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.__CLR3_0_2wspf0nr5x(TestBlockTokenWithDFS.java:529)
	at org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End(TestBlockTokenWithDFS.java:526)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of b006f7d16f5860111dd22bf808f8c735 but expecting cf14da03dc722c69735f636c25416575

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of b006f7d16f5860111dd22bf808f8c735 but expecting cf14da03dc722c69735f636c25416575
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4te6(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 514 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/514/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 808061 lines...]
    [junit] 	at org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:437)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:625)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit] 	at org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit] 	at java.lang.Thread.sleep(Native Method)
    [junit] 	at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit] 	... 11 more
    [junit] 2010-12-08 15:43:09,263 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-08 15:43:09,324 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-08 15:43:09,363 INFO  datanode.DataNode (DataNode.java:run(1445)) - DatanodeRegistration(127.0.0.1:46844, storageID=DS-2126632194-127.0.1.1-46844-1291822978276, infoPort=47150, ipcPort=34237):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-08 15:43:09,363 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 34237
    [junit] 2010-12-08 15:43:09,363 INFO  datanode.DataNode (DataNode.java:shutdown(771)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-08 15:43:09,364 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-08 15:43:09,364 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-08 15:43:09,364 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-08 15:43:09,466 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-08 15:43:09,466 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 10 2 
    [junit] 2010-12-08 15:43:09,466 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-08 15:43:09,467 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 60810
    [junit] 2010-12-08 15:43:09,468 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 60810: exiting
    [junit] 2010-12-08 15:43:09,468 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 60810: exiting
    [junit] 2010-12-08 15:43:09,468 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 60810: exiting
    [junit] 2010-12-08 15:43:09,469 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 60810: exiting
    [junit] 2010-12-08 15:43:09,469 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 60810: exiting
    [junit] 2010-12-08 15:43:09,469 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 60810: exiting
    [junit] 2010-12-08 15:43:09,468 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 60810: exiting
    [junit] 2010-12-08 15:43:09,468 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-08 15:43:09,468 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 60810
    [junit] 2010-12-08 15:43:09,469 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 60810: exiting
    [junit] 2010-12-08 15:43:09,469 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 60810: exiting
    [junit] 2010-12-08 15:43:09,469 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 60810: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 78.388 sec

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: Tests failed!

Total time: 250 minutes 9 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
9 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.TestFileAppend4.testRecoverFinalizedBlock

Error Message:
test timed out after 60000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
	at java.io.FileInputStream.readBytes(Native Method)
	at java.io.FileInputStream.read(FileInputStream.java:199)
	at java.io.BufferedInputStream.read1(BufferedInputStream.java:256)
	at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
	at java.io.BufferedInputStream.fill(BufferedInputStream.java:218)
	at java.io.BufferedInputStream.read1(BufferedInputStream.java:258)
	at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
	at sun.security.provider.SeedGenerator$URLSeedGenerator.getSeedByte(SeedGenerator.java:453)
	at sun.security.provider.SeedGenerator.getSeedBytes(SeedGenerator.java:123)
	at sun.security.provider.SeedGenerator.generateSeed(SeedGenerator.java:118)
	at sun.security.provider.SecureRandom.engineGenerateSeed(SecureRandom.java:114)
	at sun.security.provider.SecureRandom.engineNextBytes(SecureRandom.java:171)
	at java.security.SecureRandom.nextBytes(SecureRandom.java:433)
	at java.security.SecureRandom.next(SecureRandom.java:455)
	at java.util.Random.nextLong(Random.java:284)
	at org.mortbay.jetty.servlet.HashSessionIdManager.doStart(HashSessionIdManager.java:139)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.servlet.AbstractSessionManager.doStart(AbstractSessionManager.java:168)
	at org.mortbay.jetty.servlet.HashSessionManager.doStart(HashSessionManager.java:67)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.servlet.SessionHandler.doStart(SessionHandler.java:115)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.handler.HandlerWrapper.doStart(HandlerWrapper.java:130)
	at org.mortbay.jetty.handler.ContextHandler.startContext(ContextHandler.java:537)
	at org.mortbay.jetty.servlet.Context.startContext(Context.java:136)
	at org.mortbay.jetty.webapp.WebAppContext.startContext(WebAppContext.java:1234)
	at org.mortbay.jetty.handler.ContextHandler.doStart(ContextHandler.java:517)
	at org.mortbay.jetty.webapp.WebAppContext.doStart(WebAppContext.java:460)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.handler.HandlerCollection.doStart(HandlerCollection.java:152)
	at org.mortbay.jetty.handler.ContextHandlerCollection.doStart(ContextHandlerCollection.java:156)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.handler.HandlerWrapper.doStart(HandlerWrapper.java:130)
	at org.mortbay.jetty.Server.doStart(Server.java:222)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.apache.hadoop.http.HttpServer.start(HttpServer.java:618)
	at org.apache.hadoop.hdfs.server.namenode.NameNode$1.run(NameNode.java:516)
	at org.apache.hadoop.hdfs.server.namenode.NameNode$1.run(NameNode.java:461)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:396)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1115)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.startHttpServer(NameNode.java:461)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.activate(NameNode.java:405)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:389)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:578)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:571)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1534)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:445)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileAppend4.__CLR3_0_21z1ppcxv6(TestFileAppend4.java:151)
	at org.apache.hadoop.hdfs.TestFileAppend4.testRecoverFinalizedBlock(TestFileAppend4.java:150)


REGRESSION:  org.apache.hadoop.hdfs.TestFileAppend4.testCompleteOtherLeaseHoldersFile

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileAppend4.__CLR3_0_269ddf9xwa(TestFileAppend4.java:222)
	at org.apache.hadoop.hdfs.TestFileAppend4.testCompleteOtherLeaseHoldersFile(TestFileAppend4.java:221)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
	at sun.nio.ch.IOUtil.initPipe(Native Method)
	at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
	at sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
	at java.nio.channels.Selector.open(Selector.java:209)
	at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
	at org.apache.hadoop.ipc.Server.<init>(Server.java:1511)
	at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
	at org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
	at org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
	at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:416)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:507)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1561)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1504)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1471)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_29j3j5brtf(TestBalancer.java:327)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0(TestBalancer.java:324)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancerDefaultConstructor(TestBalancer.java:279)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancerDefaultConstructor(TestBalancer.java:376)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_2g13gq9rto(TestBalancer.java:344)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2(TestBalancer.java:341)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.integrationTest(TestBalancer.java:319)
	at org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.__CLR3_0_2wspf0nr5x(TestBlockTokenWithDFS.java:529)
	at org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End(TestBlockTokenWithDFS.java:526)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 21ca7add645d906726cbf9a78dbae90f but expecting 8e2f096b8ca2253e32f060fd86240bb9

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 21ca7add645d906726cbf9a78dbae90f but expecting 8e2f096b8ca2253e32f060fd86240bb9
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4te6(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 513 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/513/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 862693 lines...]
    [junit] 2010-12-07 13:18:03,035 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-07 13:18:03,035 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(770)) - Shutting down DataNode 0
    [junit] 2010-12-07 13:18:03,136 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 46659
    [junit] 2010-12-07 13:18:03,137 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 46659: exiting
    [junit] 2010-12-07 13:18:03,137 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 46659
    [junit] 2010-12-07 13:18:03,137 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-07 13:18:03,137 INFO  datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-12-07 13:18:03,138 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:36292, storageID=DS-830827849-127.0.1.1-36292-1291727872251, infoPort=37801, ipcPort=46659):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-07 13:18:03,140 INFO  datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-07 13:18:03,241 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-07 13:18:03,241 INFO  datanode.DataNode (DataNode.java:run(1442)) - DatanodeRegistration(127.0.0.1:36292, storageID=DS-830827849-127.0.1.1-36292-1291727872251, infoPort=37801, ipcPort=46659):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-07 13:18:03,241 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 46659
    [junit] 2010-12-07 13:18:03,241 INFO  datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-07 13:18:03,242 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-07 13:18:03,242 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-07 13:18:03,243 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-07 13:18:03,345 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-07 13:18:03,345 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-07 13:18:03,345 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 3 
    [junit] 2010-12-07 13:18:03,347 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 51634
    [junit] 2010-12-07 13:18:03,347 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 51634: exiting
    [junit] 2010-12-07 13:18:03,347 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 51634: exiting
    [junit] 2010-12-07 13:18:03,347 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 51634: exiting
    [junit] 2010-12-07 13:18:03,347 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 51634: exiting
    [junit] 2010-12-07 13:18:03,347 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 51634: exiting
    [junit] 2010-12-07 13:18:03,348 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 51634: exiting
    [junit] 2010-12-07 13:18:03,348 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 51634: exiting
    [junit] 2010-12-07 13:18:03,348 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 51634: exiting
    [junit] 2010-12-07 13:18:03,348 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 51634: exiting
    [junit] 2010-12-07 13:18:03,348 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 51634: exiting
    [junit] 2010-12-07 13:18:03,350 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-07 13:18:03,349 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 51634
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.169 sec

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: Tests failed!

Total time: 102 minutes 24 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
8 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks expected:<2> but was:<1>
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2j2e00jqch(TestBlockReport.java:414)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_08(TestBlockReport.java:390)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot run program "du": java.io.IOException: error=24, Too many open files

Stack Trace:
java.io.IOException: Cannot run program "du": java.io.IOException: error=24, Too many open files
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:201)
	at org.apache.hadoop.util.Shell.run(Shell.java:183)
	at org.apache.hadoop.fs.DU.<init>(DU.java:57)
	at org.apache.hadoop.fs.DU.<init>(DU.java:67)
	at org.apache.hadoop.hdfs.server.datanode.FSDataset$FSVolume.<init>(FSDataset.java:342)
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.<init>(FSDataset.java:873)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initFsDataSet(DataNode.java:395)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:500)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1558)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1501)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1468)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
	at java.lang.ProcessImpl.start(ProcessImpl.java:65)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1523)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1388)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1334)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:577)
	at org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:781)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqrf(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1437)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_29j3j5brtc(TestBalancer.java:327)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0(TestBalancer.java:324)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancerDefaultConstructor(TestBalancer.java:279)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancerDefaultConstructor(TestBalancer.java:376)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_2g13gq9rtl(TestBalancer.java:344)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2(TestBalancer.java:341)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.integrationTest(TestBalancer.java:319)
	at org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.__CLR3_0_2wspf0nr5u(TestBlockTokenWithDFS.java:529)
	at org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End(TestBlockTokenWithDFS.java:526)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 28fdb63b6115dea485f4314e9c7cf279 but expecting edebf1962aa94398b10342cbcb30269d

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 28fdb63b6115dea485f4314e9c7cf279 but expecting edebf1962aa94398b10342cbcb30269d
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4te3(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)




Hadoop-Hdfs-trunk - Build # 512 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/512/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 1045 lines...]
ivy-init:
[ivy:configure] :: Ivy 2.1.0 - 20090925235825 :: http://ant.apache.org/ivy/ ::
[ivy:configure] :: loading settings :: file = /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/ivy/ivysettings.xml

ivy-resolve-common:
[ivy:resolve] downloading https://repository.apache.org/content/repositories/snapshots/org/apache/hadoop/hadoop-common/0.23.0-SNAPSHOT/hadoop-common-0.23.0-20101203.230226-14.jar ...
[ivy:resolve] .....................................................................................
[ivy:resolve] ..................................................................................................................... (1358kB)
[ivy:resolve] .. (0kB)
[ivy:resolve] 	[SUCCESSFUL ] org.apache.hadoop#hadoop-common;0.23.0-SNAPSHOT!hadoop-common.jar (2193ms)
[ivy:resolve] downloading http://repo1.maven.org/maven2/org/apache/hadoop/avro/1.3.2/avro-1.3.2.jar ...
[ivy:resolve] ...................................................................................................................................................................................................... (331kB)
[ivy:resolve] .. (0kB)
[ivy:resolve] 
[ivy:resolve] :: problems summary ::
[ivy:resolve] :::: WARNINGS
[ivy:resolve] 		[FAILED     ] org.apache.hadoop#avro;1.3.2!avro.jar: invalid sha1: expected=7b6858e308cb0aee4b565442ef05563c9f62fca1 computed=da39a3ee5e6b4b0d3255bfef95601890afd80709 (2177ms)
[ivy:resolve] 		[FAILED     ] org.apache.hadoop#avro;1.3.2!avro.jar:  (0ms)
[ivy:resolve] 	==== apache-snapshot: tried
[ivy:resolve] 	  https://repository.apache.org/content/repositories/snapshots/org/apache/hadoop/avro/1.3.2/avro-1.3.2.jar
[ivy:resolve] 	==== maven2: tried
[ivy:resolve] 	  http://repo1.maven.org/maven2/org/apache/hadoop/avro/1.3.2/avro-1.3.2.jar
[ivy:resolve] 		::::::::::::::::::::::::::::::::::::::::::::::
[ivy:resolve] 		::              FAILED DOWNLOADS            ::
[ivy:resolve] 		:: ^ see resolution messages for details  ^ ::
[ivy:resolve] 		::::::::::::::::::::::::::::::::::::::::::::::
[ivy:resolve] 		:: org.apache.hadoop#avro;1.3.2!avro.jar
[ivy:resolve] 		::::::::::::::::::::::::::::::::::::::::::::::
[ivy:resolve] 
[ivy:resolve] :: USE VERBOSE OR DEBUG MESSAGE LEVEL FOR MORE DETAILS

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:1716: impossible to resolve dependencies:
	resolve failed - see output for details

Total time: 13 seconds


======================================================================
======================================================================
STORE: saving artifacts
======================================================================
======================================================================


mv: cannot stat `build/*.tar.gz': No such file or directory
mv: cannot stat `build/*.jar': No such file or directory
mv: cannot stat `build/test/findbugs': No such file or directory
mv: cannot stat `build/docs/api': No such file or directory
Build Failed
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
No tests ran.

Hadoop-Hdfs-trunk - Build # 511 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/511/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 858706 lines...]
    [junit] 2010-12-05 14:29:01,636 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-05 14:29:01,636 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(770)) - Shutting down DataNode 0
    [junit] 2010-12-05 14:29:01,737 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 59672
    [junit] 2010-12-05 14:29:01,738 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 59672: exiting
    [junit] 2010-12-05 14:29:01,738 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-05 14:29:01,738 INFO  datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-12-05 14:29:01,738 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 59672
    [junit] 2010-12-05 14:29:01,738 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:39140, storageID=DS-1618396504-127.0.1.1-39140-1291559330818, infoPort=43956, ipcPort=59672):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-05 14:29:01,741 INFO  datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-05 14:29:01,841 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-05 14:29:01,842 INFO  datanode.DataNode (DataNode.java:run(1442)) - DatanodeRegistration(127.0.0.1:39140, storageID=DS-1618396504-127.0.1.1-39140-1291559330818, infoPort=43956, ipcPort=59672):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-05 14:29:01,842 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 59672
    [junit] 2010-12-05 14:29:01,842 INFO  datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-05 14:29:01,842 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-05 14:29:01,842 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-05 14:29:01,843 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-05 14:29:01,960 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-05 14:29:01,961 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 9 2 
    [junit] 2010-12-05 14:29:01,961 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-05 14:29:01,962 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 40862
    [junit] 2010-12-05 14:29:01,963 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 40862: exiting
    [junit] 2010-12-05 14:29:01,963 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 40862: exiting
    [junit] 2010-12-05 14:29:01,963 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 40862: exiting
    [junit] 2010-12-05 14:29:01,966 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-05 14:29:01,966 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 40862: exiting
    [junit] 2010-12-05 14:29:01,966 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 40862
    [junit] 2010-12-05 14:29:01,963 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 40862: exiting
    [junit] 2010-12-05 14:29:01,966 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 40862: exiting
    [junit] 2010-12-05 14:29:01,966 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 40862: exiting
    [junit] 2010-12-05 14:29:01,966 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 40862: exiting
    [junit] 2010-12-05 14:29:01,966 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 40862: exiting
    [junit] 2010-12-05 14:29:01,966 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 40862: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 53.1 sec

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: Tests failed!

Total time: 173 minutes 25 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
14 tests failed.
REGRESSION:  org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09

Error Message:
Wrong number of PendingReplication blocks expected:<2> but was:<1>

Stack Trace:
junit.framework.AssertionFailedError: Wrong number of PendingReplication blocks expected:<2> but was:<1>
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.__CLR3_0_2fte182qd0(TestBlockReport.java:457)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockReport.blockReport_09(TestBlockReport.java:429)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1523)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1388)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1334)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:577)
	at org.apache.hadoop.fs.FileSystem.setDefaultUri(FileSystem.java:162)
	at org.apache.hadoop.fs.FileSystem.setDefaultUri(FileSystem.java:170)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:414)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1437)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1523)
	at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1388)
	at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1334)
	at org.apache.hadoop.conf.Configuration.set(Configuration.java:577)
	at org.apache.hadoop.fs.FileSystem.setDefaultUri(FileSystem.java:162)
	at org.apache.hadoop.fs.FileSystem.setDefaultUri(FileSystem.java:170)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:414)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml (Too many open files)
	at java.io.FileInputStream.open(Native Method)
	at java.io.FileInputStream.<init>(FileInputStream.java:106)
	at java.io.FileInputStream.<init>(FileInputStream.java:66)
	at sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
	at sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
	at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
	at com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
	at com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
	at com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
	at com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
	at com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
	at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
	at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1437)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/VERSION (Too many open files)

Stack Trace:
java.io.FileNotFoundException: /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/data/data1/current/VERSION (Too many open files)
	at java.io.RandomAccessFile.open(Native Method)
	at java.io.RandomAccessFile.<init>(RandomAccessFile.java:212)
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.write(Storage.java:265)
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.write(Storage.java:259)
	at org.apache.hadoop.hdfs.server.common.Storage.writeAll(Storage.java:800)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.register(DataNode.java:696)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.runDatanodeDaemon(DataNode.java:1452)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:628)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:315)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqrd(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_29j3j5brta(TestBalancer.java:327)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0(TestBalancer.java:324)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancerDefaultConstructor(TestBalancer.java:279)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancerDefaultConstructor(TestBalancer.java:376)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_2g13gq9rtj(TestBalancer.java:344)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2(TestBalancer.java:341)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.integrationTest(TestBalancer.java:319)
	at org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.__CLR3_0_2wspf0nr5s(TestBlockTokenWithDFS.java:529)
	at org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End(TestBlockTokenWithDFS.java:526)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of e91554ca5aaf4323460b6d01e7367bb6 but expecting 5d5cafa90118feaa71a5770cbfd403a5

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of e91554ca5aaf4323460b6d01e7367bb6 but expecting 5d5cafa90118feaa71a5770cbfd403a5
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4te1(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2lttiju10x9(TestBlockRecovery.java:165)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedReplicas(TestBlockRecovery.java:153)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRbwReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2c2lg1h10xr(TestBlockRecovery.java:204)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRbwReplicas(TestBlockRecovery.java:190)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRwrReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_29tewcb10ya(TestBlockRecovery.java:243)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRwrReplicas(TestBlockRecovery.java:229)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBWReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2cqk51310yt(TestBlockRecovery.java:281)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBWReplicas(TestBlockRecovery.java:269)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBW_RWRReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2396azp10z6(TestBlockRecovery.java:305)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBW_RWRReplicas(TestBlockRecovery.java:293)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRWRReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2ahdlbx10zi(TestBlockRecovery.java:329)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRWRReplicas(TestBlockRecovery.java:317)




Hadoop-Hdfs-trunk - Build # 510 - Still Failing

Posted by Apache Hudson Server <hu...@hudson.apache.org>.
See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/510/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE ###########################
[...truncated 881713 lines...]
    [junit] 2010-12-04 15:38:20,789 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-04 15:38:20,789 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-04 15:38:20,790 INFO  hdfs.MiniDFSCluster (MiniDFSCluster.java:shutdownDataNodes(770)) - Shutting down DataNode 0
    [junit] 2010-12-04 15:38:20,891 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 40370
    [junit] 2010-12-04 15:38:20,892 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 40370: exiting
    [junit] 2010-12-04 15:38:20,892 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 40370
    [junit] 2010-12-04 15:38:20,892 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-04 15:38:20,892 WARN  datanode.DataNode (DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:58092, storageID=DS-1741088443-127.0.1.1-58092-1291477089867, infoPort=45819, ipcPort=40370):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit] 	at java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit] 	at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit] 	at sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit] 	at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit] 	at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2010-12-04 15:38:20,892 INFO  datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 1
    [junit] 2010-12-04 15:38:20,894 INFO  datanode.DataBlockScanner (DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2010-12-04 15:38:20,993 INFO  datanode.DataNode (DataNode.java:run(1442)) - DatanodeRegistration(127.0.0.1:58092, storageID=DS-1741088443-127.0.1.1-58092-1291477089867, infoPort=45819, ipcPort=40370):Finishing DataNode in: FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2010-12-04 15:38:20,993 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 40370
    [junit] 2010-12-04 15:38:20,993 INFO  datanode.DataNode (DataNode.java:shutdown(768)) - Waiting for threadgroup to exit, active threads is 0
    [junit] 2010-12-04 15:38:20,993 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk service threads...
    [junit] 2010-12-04 15:38:20,994 INFO  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads have been shut down.
    [junit] 2010-12-04 15:38:20,994 WARN  datanode.FSDatasetAsyncDiskService (FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already shut down.
    [junit] 2010-12-04 15:38:21,103 WARN  namenode.FSNamesystem (FSNamesystem.java:run(2822)) - ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-04 15:38:21,103 INFO  namenode.FSEditLog (FSEditLog.java:printStatistics(631)) - Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 8 3 
    [junit] 2010-12-04 15:38:21,103 WARN  namenode.DecommissionManager (DecommissionManager.java:run(70)) - Monitor interrupted: java.lang.InterruptedException: sleep interrupted
    [junit] 2010-12-04 15:38:21,104 INFO  ipc.Server (Server.java:stop(1611)) - Stopping server on 52691
    [junit] 2010-12-04 15:38:21,105 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 0 on 52691: exiting
    [junit] 2010-12-04 15:38:21,105 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 3 on 52691: exiting
    [junit] 2010-12-04 15:38:21,105 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 1 on 52691: exiting
    [junit] 2010-12-04 15:38:21,106 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 7 on 52691: exiting
    [junit] 2010-12-04 15:38:21,106 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 9 on 52691: exiting
    [junit] 2010-12-04 15:38:21,106 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 2 on 52691: exiting
    [junit] 2010-12-04 15:38:21,106 INFO  ipc.Server (Server.java:run(475)) - Stopping IPC Server listener on 52691
    [junit] 2010-12-04 15:38:21,105 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 5 on 52691: exiting
    [junit] 2010-12-04 15:38:21,105 INFO  ipc.Server (Server.java:run(675)) - Stopping IPC Server Responder
    [junit] 2010-12-04 15:38:21,106 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 4 on 52691: exiting
    [junit] 2010-12-04 15:38:21,106 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 6 on 52691: exiting
    [junit] 2010-12-04 15:38:21,106 INFO  ipc.Server (Server.java:run(1444)) - IPC Server handler 8 on 52691: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.136 sec

checkfailure:

run-test-hdfs-all-withtestcaseonly:

run-test-hdfs:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:724: Tests failed!

Total time: 242 minutes 44 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) ##############################
15 tests failed.
FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  org.apache.hadoop.hdfs.TestFileAppend4.testRecoverFinalizedBlock

Error Message:
test timed out after 60000 milliseconds

Stack Trace:
java.lang.Exception: test timed out after 60000 milliseconds
	at java.io.FileInputStream.readBytes(Native Method)
	at java.io.FileInputStream.read(FileInputStream.java:199)
	at java.io.BufferedInputStream.read1(BufferedInputStream.java:256)
	at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
	at java.io.BufferedInputStream.fill(BufferedInputStream.java:218)
	at java.io.BufferedInputStream.read1(BufferedInputStream.java:258)
	at java.io.BufferedInputStream.read(BufferedInputStream.java:317)
	at sun.security.provider.SeedGenerator$URLSeedGenerator.getSeedByte(SeedGenerator.java:453)
	at sun.security.provider.SeedGenerator.getSeedBytes(SeedGenerator.java:123)
	at sun.security.provider.SeedGenerator.generateSeed(SeedGenerator.java:118)
	at sun.security.provider.SecureRandom.engineGenerateSeed(SecureRandom.java:114)
	at sun.security.provider.SecureRandom.engineNextBytes(SecureRandom.java:171)
	at java.security.SecureRandom.nextBytes(SecureRandom.java:433)
	at java.security.SecureRandom.next(SecureRandom.java:455)
	at java.util.Random.nextLong(Random.java:284)
	at org.mortbay.jetty.servlet.HashSessionIdManager.doStart(HashSessionIdManager.java:139)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.servlet.AbstractSessionManager.doStart(AbstractSessionManager.java:168)
	at org.mortbay.jetty.servlet.HashSessionManager.doStart(HashSessionManager.java:67)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.servlet.SessionHandler.doStart(SessionHandler.java:115)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.handler.HandlerWrapper.doStart(HandlerWrapper.java:130)
	at org.mortbay.jetty.handler.ContextHandler.startContext(ContextHandler.java:537)
	at org.mortbay.jetty.servlet.Context.startContext(Context.java:136)
	at org.mortbay.jetty.webapp.WebAppContext.startContext(WebAppContext.java:1234)
	at org.mortbay.jetty.handler.ContextHandler.doStart(ContextHandler.java:517)
	at org.mortbay.jetty.webapp.WebAppContext.doStart(WebAppContext.java:460)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.handler.HandlerCollection.doStart(HandlerCollection.java:152)
	at org.mortbay.jetty.handler.ContextHandlerCollection.doStart(ContextHandlerCollection.java:156)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.mortbay.jetty.handler.HandlerWrapper.doStart(HandlerWrapper.java:130)
	at org.mortbay.jetty.Server.doStart(Server.java:222)
	at org.mortbay.component.AbstractLifeCycle.start(AbstractLifeCycle.java:50)
	at org.apache.hadoop.http.HttpServer.start(HttpServer.java:618)
	at org.apache.hadoop.hdfs.server.namenode.NameNode$1.run(NameNode.java:516)
	at org.apache.hadoop.hdfs.server.namenode.NameNode$1.run(NameNode.java:461)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:396)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1115)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.startHttpServer(NameNode.java:461)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.activate(NameNode.java:405)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:389)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:578)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:571)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1534)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:445)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileAppend4.__CLR3_0_21z1ppcxv2(TestFileAppend4.java:151)
	at org.apache.hadoop.hdfs.TestFileAppend4.testRecoverFinalizedBlock(TestFileAppend4.java:150)


FAILED:  org.apache.hadoop.hdfs.TestFileAppend4.testCompleteOtherLeaseHoldersFile

Error Message:
Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1. The directory is already locked.
	at org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1333)
	at org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1351)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
	at org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:435)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileAppend4.__CLR3_0_269ddf9xw6(TestFileAppend4.java:222)
	at org.apache.hadoop.hdfs.TestFileAppend4.testCompleteOtherLeaseHoldersFile(TestFileAppend4.java:221)


FAILED:  org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Cannot run program "du": java.io.IOException: error=24, Too many open files

Stack Trace:
java.io.IOException: Cannot run program "du": java.io.IOException: error=24, Too many open files
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
	at org.apache.hadoop.util.Shell.runCommand(Shell.java:201)
	at org.apache.hadoop.util.Shell.run(Shell.java:183)
	at org.apache.hadoop.fs.DU.<init>(DU.java:57)
	at org.apache.hadoop.fs.DU.<init>(DU.java:67)
	at org.apache.hadoop.hdfs.server.datanode.FSDataset$FSVolume.<init>(FSDataset.java:342)
	at org.apache.hadoop.hdfs.server.datanode.FSDataset.<init>(FSDataset.java:873)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.initFsDataSet(DataNode.java:395)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:500)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:281)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:263)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1558)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1501)
	at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1468)
	at org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:614)
	at org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:448)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:176)
	at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:71)
	at org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:168)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:315)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tqrd(TestFileConcurrentReader.java:275)
	at org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open files
	at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
	at java.lang.ProcessImpl.start(ProcessImpl.java:65)
	at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_29j3j5brta(TestBalancer.java:327)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer0(TestBalancer.java:324)


FAILED:  org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancerDefaultConstructor(TestBalancer.java:279)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancerDefaultConstructor(TestBalancer.java:376)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.__CLR3_0_2g13gq9rtj(TestBalancer.java:344)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.testBalancer2(TestBalancer.java:341)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End

Error Message:
Mismatched number of datanodes

Stack Trace:
junit.framework.AssertionFailedError: Mismatched number of datanodes
	at org.apache.hadoop.hdfs.server.balancer.Balancer.chooseNodes(Balancer.java:1069)
	at org.apache.hadoop.hdfs.server.balancer.Balancer.run(Balancer.java:1511)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.runBalancer(TestBalancer.java:247)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.test(TestBalancer.java:234)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.oneNodeTest(TestBalancer.java:307)
	at org.apache.hadoop.hdfs.server.balancer.TestBalancer.integrationTest(TestBalancer.java:319)
	at org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.__CLR3_0_2wspf0nr5s(TestBlockTokenWithDFS.java:529)
	at org.apache.hadoop.hdfs.server.namenode.TestBlockTokenWithDFS.testEnd2End(TestBlockTokenWithDFS.java:526)


FAILED:  org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 29cc2291aee45a8dd8536da2a53e4c6b but expecting 377fa58678572b777c126b6cddc5e0c5

Stack Trace:
java.io.IOException: Image file /grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage is corrupt with MD5 checksum of 29cc2291aee45a8dd8536da2a53e4c6b but expecting 377fa58678572b777c126b6cddc5e0c5
	at org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
	at org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4te1(TestStorageRestore.java:316)
	at org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2lttiju10x9(TestBlockRecovery.java:165)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedReplicas(TestBlockRecovery.java:153)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRbwReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2c2lg1h10xr(TestBlockRecovery.java:204)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRbwReplicas(TestBlockRecovery.java:190)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRwrReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_29tewcb10ya(TestBlockRecovery.java:243)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testFinalizedRwrReplicas(TestBlockRecovery.java:229)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBWReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2cqk51310yt(TestBlockRecovery.java:281)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBWReplicas(TestBlockRecovery.java:269)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBW_RWRReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2396azp10z6(TestBlockRecovery.java:305)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRBW_RWRReplicas(TestBlockRecovery.java:293)


FAILED:  org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRWRReplicas

Error Message:
null

Stack Trace:
java.lang.NullPointerException
	at org.apache.hadoop.hdfs.server.datanode.DataNode.syncBlock(DataNode.java:1883)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testSyncReplicas(TestBlockRecovery.java:144)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.__CLR3_0_2ahdlbx10zi(TestBlockRecovery.java:329)
	at org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery.testRWRReplicas(TestBlockRecovery.java:317)